summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Makefile2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c19
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c27
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c17
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c405
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c17
-rw-r--r--drivers/net/ipa/gsi_trans.c7
-rw-r--r--drivers/net/ipa/ipa.h32
-rw-r--r--drivers/net/ipa/ipa_cmd.c74
-rw-r--r--drivers/net/ipa/ipa_cmd.h16
-rw-r--r--drivers/net/ipa/ipa_data.h3
-rw-r--r--drivers/net/ipa/ipa_endpoint.c277
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c34
-rw-r--r--drivers/net/ipa/ipa_main.c112
-rw-r--r--drivers/net/ipa/ipa_mem.c19
-rw-r--r--drivers/net/ipa/ipa_qmi.c9
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c20
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h20
-rw-r--r--drivers/net/ipa/ipa_reg.c2
-rw-r--r--drivers/net/ipa/ipa_reg.h1
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipa/ipa_table.c350
-rw-r--r--drivers/net/ipa/ipa_table.h30
-rw-r--r--drivers/net/ipa/ipa_version.h4
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c13
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c507
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c13
34 files changed, 1612 insertions, 512 deletions
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 48255fc4b25c..8cdcaaf58ae3 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the Qualcomm IPA driver.
-IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.9 4.11
+IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11
obj-$(CONFIG_QCOM_IPA) += ipa.o
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index e0d71f609272..3380fb3483b2 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -525,13 +525,14 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
- .version = IPA_VERSION_3_1,
- .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_3_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 42f2c88a92d4..4287114b24db 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -406,17 +406,18 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
- .version = IPA_VERSION_3_5_1,
- .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
- BIT(BCR_TX_NOT_USING_BRESP) |
- BIT(BCR_SUSPEND_L2_IRQ) |
- BIT(BCR_HOLB_DROP_L2_IRQ) |
- BIT(BCR_DUAL_TX),
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_3_5_1,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index a204e439c23d..1b4b52501ee3 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -394,12 +394,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.11 */
const struct ipa_data ipa_data_v4_11 = {
- .version = IPA_VERSION_4_11,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_11,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 04f574fe006f..199ed0ed868b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -372,13 +372,14 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.2 */
const struct ipa_data ipa_data_v4_2 = {
- .version = IPA_VERSION_4_2,
+ .version = IPA_VERSION_4_2,
/* backward_compat value is 0 */
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 684239e71f46..19b549f2998b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -450,12 +450,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.5 */
const struct ipa_data ipa_data_v4_5 = {
- .version = IPA_VERSION_4_5,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_5,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
new file mode 100644
index 000000000000..7552c400961e
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v4.7 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL_DL = 0,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
+ IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v4.7 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 0, /* no limit (hardware max) */
+ .max_reads_beats = 120,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v4.7 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 7,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 14,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 14,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 18, .max = 18,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 2, .max = 2,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
+ .min = 15, .max = 15,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 7, .max = 7,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .min = 2, .max = 2,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v4.7 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v4.7 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x08c8,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x0ad0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x0cd0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x0ee0,
+ .size = 0x0d00,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x1be8,
+ .size = 0x0050,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x1c40,
+ .size = 0x0030,
+ .canary_count = 4,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x1c70,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x1cb8,
+ .size = 0x0238,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x1ef0,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x1f18,
+ .size = 0x100c,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x3000,
+ .size = 0x0000,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v4.7 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146a9000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00009000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 150000, /* 150 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 450000, /* 450 MBps */
+ .average_bandwidth = 75000, /* 75 MBps (unused?) */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 171400, /* 171.4 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v4.7 */
+static const struct ipa_power_data ipa_power_data = {
+ /* XXX Downstream code says 150 MHz (DT SVS2), 60 MHz (code) */
+ .core_clock_rate = 100 * 1000 * 1000, /* Hz (150? 60?) */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v4.7 */
+const struct ipa_data ipa_data_v4_7 = {
+ .version = IPA_VERSION_4_7,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 2333e15f9533..d30fc1fe6ca2 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -444,12 +444,13 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v4.9. */
const struct ipa_data ipa_data_v4_9 = {
- .version = IPA_VERSION_4_9,
- .qsb_count = ARRAY_SIZE(ipa_qsb_data),
- .qsb_data = ipa_qsb_data,
- .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
- .endpoint_data = ipa_gsi_endpoint_data,
- .resource_data = &ipa_resource_data,
- .mem_data = &ipa_mem_data,
- .power_data = &ipa_power_data,
+ .version = IPA_VERSION_4_9,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 8,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
};
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 26b7f683a3e1..0f52c068c46d 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -87,6 +87,7 @@ struct gsi_tre {
int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
u32 max_alloc)
{
+ size_t alloc_size;
void *virt;
if (!size)
@@ -103,13 +104,15 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
* If there aren't enough entries starting at the free index,
* we just allocate free entries from the beginning of the pool.
*/
- virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
+ alloc_size = size_mul(count + max_alloc - 1, size);
+ alloc_size = kmalloc_size_roundup(alloc_size);
+ virt = kzalloc(alloc_size, GFP_KERNEL);
if (!virt)
return -ENOMEM;
pool->base = virt;
/* If the allocator gave us any extra memory, use it */
- pool->count = ksize(pool->base) / size;
+ pool->count = alloc_size / size;
pool->free = 0;
pool->max_alloc = max_alloc;
pool->size = size;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 09ead433ec38..5372db58b5bd 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -39,6 +39,9 @@ struct ipa_interrupt;
* @power: IPA power information
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
+ * @route_count: Total number of entries in a routing table
+ * @modem_route_count: Number of modem entries in a routing table
+ * @filter_count: Maximum number of entries in a filter table
* @interrupt: IPA Interrupt information
* @uc_powered: true if power is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
@@ -58,11 +61,13 @@ struct ipa_interrupt;
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
- * @available: Bit mask indicating endpoints hardware supports
- * @filter_map: Bit mask indicating endpoints that support filtering
- * @initialized: Bit mask indicating endpoints initialized
- * @set_up: Bit mask indicating endpoints set up
- * @enabled: Bit mask indicating endpoints enabled
+ * @endpoint_count: Number of defined bits in most bitmaps below
+ * @available_count: Number of defined bits in the available bitmap
+ * @defined: Bitmap of endpoints defined in config data
+ * @available: Bitmap of endpoints supported by hardware
+ * @filtered: Bitmap of endpoints that support filtering
+ * @set_up: Bitmap of endpoints that are set up for use
+ * @enabled: Bitmap of currently enabled endpoints
* @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
@@ -84,6 +89,9 @@ struct ipa {
dma_addr_t table_addr;
__le64 *table_virt;
+ u32 route_count;
+ u32 modem_route_count;
+ u32 filter_count;
struct ipa_interrupt *interrupt;
bool uc_powered;
@@ -110,12 +118,14 @@ struct ipa {
void *zero_virt;
size_t zero_size;
- /* Bit masks indicating endpoint state */
- u32 available; /* supported by hardware */
- u32 filter_map;
- u32 initialized;
- u32 set_up;
- u32 enabled;
+ /* Bitmaps indicating endpoint state */
+ u32 endpoint_count;
+ u32 available_count;
+ unsigned long *defined; /* Defined in configuration data */
+ unsigned long *available; /* Supported by hardware */
+ u64 filtered; /* Support filtering (AP and modem) */
+ unsigned long *set_up;
+ unsigned long *enabled;
u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 26c3db9f52b1..bb3dfa9a2bc8 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -145,20 +145,12 @@ union ipa_cmd_payload {
static void ipa_cmd_validate_build(void)
{
- /* The sizes of a filter and route tables need to fit into fields
- * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ /* The size of a filter table needs to fit into fields in the
+ * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
* might not be used, non-hashed and hashed tables have the same
* maximum size. IPv4 and IPv6 filter tables have the same number
- * of entries, as and IPv4 and IPv6 route tables have the same number
* of entries.
*/
-#define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
-#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
- BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
- BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
-#undef TABLE_COUNT_MAX
-#undef TABLE_SIZE
-
/* Hashed and non-hashed fields are assumed to be the same size */
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
@@ -171,18 +163,22 @@ static void ipa_cmd_validate_build(void)
}
/* Validate a memory region holding a table */
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route)
{
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
struct device *dev = &ipa->pdev->dev;
+ u32 size;
+
+ size = route ? ipa->route_count : ipa->filter_count + 1;
+ size *= sizeof(__le64);
/* Size must fit in the immediate command field that holds it */
- if (mem->size > size_max) {
+ if (size > size_max) {
dev_err(dev, "%s table region size too large\n", table);
- dev_err(dev, " (0x%04x > 0x%04x)\n",
- mem->size, size_max);
+ dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
return false;
}
@@ -197,21 +193,11 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
return false;
}
- /* Entire memory range must fit within IPA-local memory */
- if (mem->offset > ipa->mem_size ||
- mem->size > ipa->mem_size - mem->offset) {
- dev_err(dev, "%s table region out of range\n", table);
- dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- mem->offset, mem->size, ipa->mem_size);
-
- return false;
- }
-
return true;
}
/* Validate the memory region that holds headers */
-static bool ipa_cmd_header_valid(struct ipa *ipa)
+static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
@@ -257,15 +243,6 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
return false;
}
- /* Make sure the entire combined area fits in IPA memory */
- if (size > ipa->mem_size || offset > ipa->mem_size - size) {
- dev_err(dev, "header table region out of range\n");
- dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- offset, size, ipa->mem_size);
-
- return false;
- }
-
return true;
}
@@ -336,26 +313,11 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
return true;
}
-bool ipa_cmd_data_valid(struct ipa *ipa)
-{
- if (!ipa_cmd_header_valid(ipa))
- return false;
-
- if (!ipa_cmd_register_write_valid(ipa))
- return false;
-
- return true;
-}
-
-
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- /* This is as good a place as any to validate build constants */
- ipa_cmd_validate_build();
-
/* Command payloads are allocated one at a time, but a single
* transaction can require up to the maximum supported by the
* channel; treat them as if they were allocated all at once.
@@ -655,3 +617,17 @@ struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
}
+
+/* Init function for immediate commands; there is no ipa_cmd_exit() */
+int ipa_cmd_init(struct ipa *ipa)
+{
+ ipa_cmd_validate_build();
+
+ if (!ipa_cmd_header_init_local_valid(ipa))
+ return -EINVAL;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 8e4243c1f0bb..e2cf1c2b0ef2 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -47,15 +47,15 @@ enum ipa_cmd_opcode {
};
/**
- * ipa_cmd_table_valid() - Validate a memory region holding a table
+ * ipa_cmd_table_init_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
*
* Return: true if region is valid, false otherwise
*/
-bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
- bool route);
+bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
@@ -162,4 +162,14 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
*/
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+/**
+ * ipa_cmd_init() - Initialize IPA immediate commands
+ * @ipa: - IPA pointer
+ *
+ * Return: 0 if successful, or a negative error code
+ *
+ * There is no need for a matching ipa_cmd_exit() function.
+ */
+int ipa_cmd_init(struct ipa *ipa);
+
#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index e5a6ce75c7dd..818e64114ed5 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -222,6 +222,7 @@ struct ipa_power_data {
* @backward_compat: BCR register value (prior to IPA v4.5 only)
* @qsb_count: number of entries in the qsb_data array
* @qsb_data: Qualcomm System Bus configuration data
+ * @modem_route_count: number of modem entries in a routing table
* @endpoint_count: number of entries in the endpoint_data array
* @endpoint_data: IPA endpoint/GSI channel data
* @resource_data: IPA resource configuration data
@@ -233,6 +234,7 @@ struct ipa_data {
u32 backward_compat;
u32 qsb_count; /* number of entries in qsb_data[] */
const struct ipa_qsb_data *qsb_data;
+ u32 modem_route_count;
u32 endpoint_count; /* number of entries in endpoint_data[] */
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
@@ -244,6 +246,7 @@ extern const struct ipa_data ipa_data_v3_1;
extern const struct ipa_data ipa_data_v3_5_1;
extern const struct ipa_data ipa_data_v4_2;
extern const struct ipa_data ipa_data_v4_5;
+extern const struct ipa_data ipa_data_v4_7;
extern const struct ipa_data ipa_data_v4_9;
extern const struct ipa_data ipa_data_v4_11;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 093e11ec7c2d..136932464261 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -243,42 +243,47 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
- const struct ipa_gsi_endpoint_data *data)
+/* Validate endpoint configuration data. Return max defined endpoint ID */
+static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
+ u32 max;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
- return false;
+ return 0;
}
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
dev_err(dev, "LAN RX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
dev_err(dev, "AP->modem TX endpoint not defined\n");
- return false;
+ return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
dev_err(dev, "AP<-modem RX endpoint not defined\n");
- return false;
+ return 0;
}
- for (name = 0; name < count; name++, dp++)
+ max = 0;
+ for (name = 0; name < count; name++, dp++) {
if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
- return false;
+ return 0;
+ max = max_t(u32, max, dp->endpoint_id);
+ }
- return true;
+ return max;
}
/* Allocate a transaction to use on a non-command endpoint */
@@ -345,29 +350,32 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
u32 val;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
- return !!(val & mask);
+ return !!(val & BIT(endpoint_id % 32));
}
static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
}
/**
@@ -426,10 +434,10 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
*/
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
- u32 endpoint_id;
+ u32 endpoint_id = 0;
- for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
- struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
@@ -448,8 +456,8 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
/* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
struct gsi_trans *trans;
+ u32 endpoint_id;
u32 count;
/* We need one command per modem TX endpoint, plus the commands
@@ -463,14 +471,11 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
return -EBUSY;
}
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
const struct ipa_reg *reg;
u32 offset;
- initialized ^= BIT(endpoint_id);
-
/* We only reset modem TX endpoints */
endpoint = &ipa->endpoint[endpoint_id];
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
@@ -1008,10 +1013,10 @@ static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
- u32 i;
+ u32 endpoint_id = 0;
- for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
- struct ipa_endpoint *endpoint = &ipa->endpoint[i];
+ while (endpoint_id < ipa->endpoint_count) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
@@ -1661,6 +1666,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
@@ -1670,37 +1676,35 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
dev_err(&ipa->pdev->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
- endpoint->channel_id, endpoint->endpoint_id);
+ endpoint->channel_id, endpoint_id);
return ret;
}
if (!endpoint->toward_ipa) {
- ipa_interrupt_suspend_enable(ipa->interrupt,
- endpoint->endpoint_id);
+ ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
ipa_endpoint_replenish_enable(endpoint);
}
- ipa->enabled |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint_id, ipa->enabled);
return 0;
}
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
{
- u32 mask = BIT(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
struct gsi *gsi = &ipa->gsi;
int ret;
- if (!(ipa->enabled & mask))
+ if (!test_bit(endpoint_id, ipa->enabled))
return;
- ipa->enabled ^= mask;
+ __clear_bit(endpoint_id, endpoint->ipa->enabled);
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
- ipa_interrupt_suspend_disable(ipa->interrupt,
- endpoint->endpoint_id);
+ ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
}
/* Note that if stop fails, the channel's state is not well-defined */
@@ -1708,7 +1712,7 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
if (ret)
dev_err(&ipa->pdev->dev,
"error %d attempting to stop endpoint %u\n", ret,
- endpoint->endpoint_id);
+ endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
@@ -1717,7 +1721,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa) {
@@ -1737,7 +1741,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
- if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
return;
if (!endpoint->toward_ipa)
@@ -1797,12 +1801,12 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
ipa_endpoint_program(endpoint);
- endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
}
static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
{
- endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
if (!endpoint->toward_ipa)
cancel_delayed_work_sync(&endpoint->replenish_work);
@@ -1812,45 +1816,39 @@ static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_setup(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
-
- ipa->set_up = 0;
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
-
- initialized ^= BIT(endpoint_id);
+ u32 endpoint_id;
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
- }
}
void ipa_endpoint_teardown(struct ipa *ipa)
{
- u32 set_up = ipa->set_up;
-
- while (set_up) {
- u32 endpoint_id = __fls(set_up);
-
- set_up ^= BIT(endpoint_id);
+ u32 endpoint_id;
+ for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
- }
- ipa->set_up = 0;
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available_count = 0;
+ bitmap_free(ipa->available);
+ ipa->available = NULL;
}
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_reg *reg;
- u32 initialized;
+ u32 endpoint_id;
+ u32 tx_count;
+ u32 rx_count;
u32 rx_base;
- u32 rx_mask;
- u32 tx_mask;
- int ret = 0;
- u32 max;
+ u32 limit;
u32 val;
- /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
+ /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
* Furthermore, the endpoints were not grouped such that TX
* endpoint numbers started with 0 and RX endpoints had numbers
* higher than all TX endpoints, so we can't do the simple
@@ -1861,61 +1859,78 @@ int ipa_endpoint_config(struct ipa *ipa)
* assume the configuration is valid.
*/
if (ipa->version < IPA_VERSION_3_5) {
- ipa->available = ~0;
+ ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = IPA_ENDPOINT_MAX;
+
+ bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
+
return 0;
}
/* Find out about the endpoints supplied by the hardware, and ensure
- * the highest one doesn't exceed the number we support.
+ * the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
- /* Our RX is an IPA producer */
+ /* Our RX is an IPA producer; our TX is an IPA consumer. */
+ tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
- max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- if (max > IPA_ENDPOINT_MAX) {
- dev_err(dev, "too many endpoints (%u > %u)\n",
- max, IPA_ENDPOINT_MAX);
+
+ limit = rx_base + rx_count;
+ if (limit > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints, %u > %u\n",
+ limit, IPA_ENDPOINT_MAX);
return -EINVAL;
}
- rx_mask = GENMASK(max - 1, rx_base);
- /* Our TX is an IPA consumer */
- max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- tx_mask = GENMASK(max - 1, 0);
-
- ipa->available = rx_mask | tx_mask;
+ /* Allocate and initialize the available endpoint bitmap */
+ ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
+ if (!ipa->available)
+ return -ENOMEM;
+ ipa->available_count = limit;
- /* Check for initialized endpoints not supported by the hardware */
- if (ipa->initialized & ~ipa->available) {
- dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
- ipa->initialized & ~ipa->available);
- ret = -EINVAL; /* Report other errors too */
- }
+ /* Mark all supported RX and TX endpoints as available */
+ bitmap_set(ipa->available, 0, tx_count);
+ bitmap_set(ipa->available, rx_base, rx_count);
- initialized = ipa->initialized;
- while (initialized) {
- u32 endpoint_id = __ffs(initialized);
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- initialized ^= BIT(endpoint_id);
+ if (endpoint_id >= limit) {
+ dev_err(dev, "invalid endpoint id, %u > %u\n",
+ endpoint_id, limit - 1);
+ goto err_free_bitmap;
+ }
+
+ if (!test_bit(endpoint_id, ipa->available)) {
+ dev_err(dev, "unavailable endpoint id %u\n",
+ endpoint_id);
+ goto err_free_bitmap;
+ }
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
- if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
- dev_err(dev, "endpoint id %u wrong direction\n",
- endpoint_id);
- ret = -EINVAL;
+ if (endpoint->toward_ipa) {
+ if (endpoint_id < tx_count)
+ continue;
+ } else if (endpoint_id >= rx_base) {
+ continue;
}
+
+ dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
+ goto err_free_bitmap;
}
- return ret;
-}
+ return 0;
-void ipa_endpoint_deconfig(struct ipa *ipa)
-{
- ipa->available = 0; /* Nothing more to do */
+err_free_bitmap:
+ ipa_endpoint_deconfig(ipa);
+
+ return -EINVAL;
}
static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
@@ -1936,46 +1951,64 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->toward_ipa = data->toward_ipa;
endpoint->config = data->endpoint.config;
- ipa->initialized |= BIT(endpoint->endpoint_id);
+ __set_bit(endpoint->endpoint_id, ipa->defined);
}
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
- endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
+ __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
memset(endpoint, 0, sizeof(*endpoint));
}
void ipa_endpoint_exit(struct ipa *ipa)
{
- u32 initialized = ipa->initialized;
-
- while (initialized) {
- u32 endpoint_id = __fls(initialized);
+ u32 endpoint_id;
- initialized ^= BIT(endpoint_id);
+ ipa->filtered = 0;
+ for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
- }
+
+ bitmap_free(ipa->enabled);
+ ipa->enabled = NULL;
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
memset(ipa->name_map, 0, sizeof(ipa->name_map));
memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
}
/* Returns a bitmask of endpoints that support filtering, or 0 on error */
-u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
enum ipa_endpoint_name name;
- u32 filter_map;
+ u32 filtered;
BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
- if (!ipa_endpoint_data_valid(ipa, count, data))
- return 0; /* Error */
+ /* Number of endpoints is one more than the maximum ID */
+ ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
+ if (!ipa->endpoint_count)
+ return -EINVAL;
+
+ /* Initialize endpoint state bitmaps */
+ ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->defined)
+ return -ENOMEM;
+
+ ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->set_up)
+ goto err_free_defined;
- ipa->initialized = 0;
+ ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
+ if (!ipa->enabled)
+ goto err_free_set_up;
- filter_map = 0;
+ filtered = 0;
for (name = 0; name < count; name++, data++) {
if (ipa_gsi_endpoint_data_empty(data))
continue; /* Skip over empty slots */
@@ -1983,18 +2016,28 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
ipa_endpoint_init_one(ipa, name, data);
if (data->endpoint.filter_support)
- filter_map |= BIT(data->endpoint_id);
+ filtered |= BIT(data->endpoint_id);
if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++;
}
- if (!ipa_filter_map_valid(ipa, filter_map))
- goto err_endpoint_exit;
+ /* Make sure the set of filtered endpoints is valid */
+ if (!ipa_filtered_valid(ipa, filtered)) {
+ ipa_endpoint_exit(ipa);
- return filter_map; /* Non-zero bitmask */
+ return -EINVAL;
+ }
-err_endpoint_exit:
- ipa_endpoint_exit(ipa);
+ ipa->filtered = filtered;
- return 0; /* Error */
+ return 0;
+
+err_free_set_up:
+ bitmap_free(ipa->set_up);
+ ipa->set_up = NULL;
+err_free_defined:
+ bitmap_free(ipa->defined);
+ ipa->defined = NULL;
+
+ return -ENOMEM;
}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index d8dfa24f5214..4a5c3bc549df 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -195,7 +195,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa);
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
void ipa_endpoint_default_route_clear(struct ipa *ipa);
-u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data);
void ipa_endpoint_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c269432f9c2e..a49f66efacb8 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -132,24 +132,28 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 endpoint_id, bool enable)
{
struct ipa *ipa = interrupt->ipa;
- u32 mask = BIT(endpoint_id);
+ u32 unit = endpoint_id / 32;
const struct ipa_reg *reg;
u32 offset;
+ u32 mask;
u32 val;
- WARN_ON(!(mask & ipa->available));
+ WARN_ON(!test_bit(endpoint_id, ipa->available));
/* IPA version 3.0 does not support TX_SUSPEND interrupt control */
if (ipa->version == IPA_VERSION_3_0)
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
- offset = ipa_reg_offset(reg);
+ offset = ipa_reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
+
+ mask = BIT(endpoint_id);
if (enable)
val |= mask;
else
val &= ~mask;
+
iowrite32(val, ipa->reg_virt + offset);
}
@@ -171,18 +175,24 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
- u32 val;
+ u32 unit_count;
+ u32 unit;
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ unit_count = roundup(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct ipa_reg *reg;
+ u32 val;
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- return;
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ }
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 49537fccf6ad..4fb92f771974 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -82,6 +82,23 @@
#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
/**
+ * enum ipa_firmware_loader: How GSI firmware gets loaded
+ *
+ * @IPA_LOADER_DEFER: System not ready; try again later
+ * @IPA_LOADER_SELF: AP loads GSI firmware
+ * @IPA_LOADER_MODEM: Modem loads GSI firmware, signals when done
+ * @IPA_LOADER_SKIP: Neither AP nor modem need to load GSI firmware
+ * @IPA_LOADER_INVALID: GSI firmware loader specification is invalid
+ */
+enum ipa_firmware_loader {
+ IPA_LOADER_DEFER,
+ IPA_LOADER_SELF,
+ IPA_LOADER_MODEM,
+ IPA_LOADER_SKIP,
+ IPA_LOADER_INVALID,
+};
+
+/**
* ipa_setup() - Set up IPA hardware
* @ipa: IPA pointer
*
@@ -646,6 +663,10 @@ static const struct of_device_id ipa_match[] = {
.data = &ipa_data_v4_5,
},
{
+ .compatible = "qcom,sm6350-ipa",
+ .data = &ipa_data_v4_7,
+ },
+ {
.compatible = "qcom,sm8350-ipa",
.data = &ipa_data_v4_9,
},
@@ -696,6 +717,50 @@ static void ipa_validate_build(void)
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
}
+static enum ipa_firmware_loader ipa_firmware_loader(struct device *dev)
+{
+ bool modem_init;
+ const char *str;
+ int ret;
+
+ /* Look up the old and new properties by name */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ ret = of_property_read_string(dev->of_node, "qcom,gsi-loader", &str);
+
+ /* If the new property doesn't exist, it's legacy behavior */
+ if (ret == -EINVAL) {
+ if (modem_init)
+ return IPA_LOADER_MODEM;
+ goto out_self;
+ }
+
+ /* Any other error on the new property means it's poorly defined */
+ if (ret)
+ return IPA_LOADER_INVALID;
+
+ /* New property value exists; if old one does too, that's invalid */
+ if (modem_init)
+ return IPA_LOADER_INVALID;
+
+ /* Modem loads GSI firmware for "modem" */
+ if (!strcmp(str, "modem"))
+ return IPA_LOADER_MODEM;
+
+ /* No GSI firmware load is needed for "skip" */
+ if (!strcmp(str, "skip"))
+ return IPA_LOADER_SKIP;
+
+ /* Any value other than "self" is an error */
+ if (strcmp(str, "self"))
+ return IPA_LOADER_INVALID;
+out_self:
+ /* We need Trust Zone to load firmware; make sure it's available */
+ if (qcom_scm_is_available())
+ return IPA_LOADER_SELF;
+
+ return IPA_LOADER_DEFER;
+}
+
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
@@ -722,9 +787,9 @@ static void ipa_validate_build(void)
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
- bool modem_init;
struct ipa *ipa;
int ret;
@@ -742,11 +807,16 @@ static int ipa_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* If we need Trust Zone, make sure it's available */
- modem_init = of_property_read_bool(dev->of_node, "modem-init");
- if (!modem_init)
- if (!qcom_scm_is_available())
- return -EPROBE_DEFER;
+ if (!data->modem_route_count) {
+ dev_err(dev, "modem_route_count cannot be zero\n");
+ return -EINVAL;
+ }
+
+ loader = ipa_firmware_loader(dev);
+ if (loader == IPA_LOADER_INVALID)
+ return -EINVAL;
+ if (loader == IPA_LOADER_DEFER)
+ return -EPROBE_DEFER;
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
@@ -766,6 +836,7 @@ static int ipa_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ipa);
ipa->power = power;
ipa->version = data->version;
+ ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
@@ -782,18 +853,15 @@ static int ipa_probe(struct platform_device *pdev)
goto err_mem_exit;
/* Result is a non-zero mask of endpoints that support filtering */
- ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
- data->endpoint_data);
- if (!ipa->filter_map) {
- ret = -EINVAL;
+ ret = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data);
+ if (ret)
goto err_gsi_exit;
- }
ret = ipa_table_init(ipa);
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, modem_init);
+ ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -808,20 +876,20 @@ static int ipa_probe(struct platform_device *pdev)
dev_info(dev, "IPA driver initialized");
- /* If the modem is doing early initialization, it will trigger a
- * call to ipa_setup() when it has finished. In that case we're
- * done here.
+ /* If the modem is loading GSI firmware, it will trigger a call to
+ * ipa_setup() when it has finished. In that case we're done here.
*/
- if (modem_init)
+ if (loader == IPA_LOADER_MODEM)
goto done;
- /* Otherwise we need to load the firmware and have Trust Zone validate
- * and install it. If that succeeds we can proceed with setup.
- */
- ret = ipa_firmware_load(dev);
- if (ret)
- goto err_deconfig;
+ if (loader == IPA_LOADER_SELF) {
+ /* The AP is loading GSI firmware; do so now */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+ } /* Otherwise loader == IPA_LOADER_SKIP */
+ /* GSI firmware is loaded; proceed to setup */
ret = ipa_setup(ipa);
if (ret)
goto err_deconfig;
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index f84c6830495a..9ec5af323f73 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -198,9 +198,12 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
case IPA_MEM_PDN_CONFIG:
case IPA_MEM_STATS_QUOTA_MODEM:
- case IPA_MEM_STATS_TETHERING:
return ipa->version >= IPA_VERSION_4_0;
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0 &&
+ ipa->version != IPA_VERSION_5_0;
+
default:
return false; /* Anything else is optional */
}
@@ -366,14 +369,6 @@ int ipa_mem_config(struct ipa *ipa)
while (--canary_count);
}
- /* Make sure filter and route table memory regions are valid */
- if (!ipa_table_valid(ipa))
- goto err_dma_free;
-
- /* Validate memory-related properties relevant to immediate commands */
- if (!ipa_cmd_data_valid(ipa))
- goto err_dma_free;
-
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
@@ -625,6 +620,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
+ /* Check the route and filter table memory regions */
+ if (!ipa_table_mem_valid(ipa, false))
+ return -EINVAL;
+ if (!ipa_table_mem_valid(ipa, true))
+ return -EINVAL;
+
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 8295fd4b70d1..f70f0a1d1cda 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -284,6 +284,7 @@ static const struct ipa_init_modem_driver_req *
init_modem_driver_req(struct ipa_qmi *ipa_qmi)
{
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ u32 modem_route_count = ipa->modem_route_count;
static struct ipa_init_modem_driver_req req;
const struct ipa_mem *mem;
@@ -308,12 +309,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v4_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v6_route_tbl_info.end = modem_route_count - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +353,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v4_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +361,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
+ req.v6_hash_route_tbl_info.end = modem_route_count - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 97c0befe8d86..894f99517233 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -9,7 +9,7 @@
#include "ipa_qmi_msg.h"
/* QMI message structure definition for struct ipa_indication_register_req */
-struct qmi_elem_info ipa_indication_register_req_ei[] = {
+const struct qmi_elem_info ipa_indication_register_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -116,7 +116,7 @@ struct qmi_elem_info ipa_indication_register_req_ei[] = {
};
/* QMI message structure definition for struct ipa_indication_register_rsp */
-struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+const struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -134,7 +134,7 @@ struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
};
/* QMI message structure definition for struct ipa_driver_init_complete_req */
-struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+const struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -151,7 +151,7 @@ struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
};
/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
-struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -169,7 +169,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
};
/* QMI message structure definition for struct ipa_init_complete_ind */
-struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+const struct qmi_elem_info ipa_init_complete_ind_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -187,7 +187,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_bounds */
-struct qmi_elem_info ipa_mem_bounds_ei[] = {
+const struct qmi_elem_info ipa_mem_bounds_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -208,7 +208,7 @@ struct qmi_elem_info ipa_mem_bounds_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_array */
-struct qmi_elem_info ipa_mem_array_ei[] = {
+const struct qmi_elem_info ipa_mem_array_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -229,7 +229,7 @@ struct qmi_elem_info ipa_mem_array_ei[] = {
};
/* QMI message structure definition for struct ipa_mem_range */
-struct qmi_elem_info ipa_mem_range_ei[] = {
+const struct qmi_elem_info ipa_mem_range_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -250,7 +250,7 @@ struct qmi_elem_info ipa_mem_range_ei[] = {
};
/* QMI message structure definition for struct ipa_init_modem_driver_req */
-struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+const struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -645,7 +645,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
};
/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
-struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index e29663965f43..b73503552c4d 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -247,15 +247,15 @@ struct ipa_init_modem_driver_rsp {
};
/* Message structure definitions defined in "ipa_qmi_msg.c" */
-extern struct qmi_elem_info ipa_indication_register_req_ei[];
-extern struct qmi_elem_info ipa_indication_register_rsp_ei[];
-extern struct qmi_elem_info ipa_driver_init_complete_req_ei[];
-extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
-extern struct qmi_elem_info ipa_init_complete_ind_ei[];
-extern struct qmi_elem_info ipa_mem_bounds_ei[];
-extern struct qmi_elem_info ipa_mem_array_ei[];
-extern struct qmi_elem_info ipa_mem_range_ei[];
-extern struct qmi_elem_info ipa_init_modem_driver_req_ei[];
-extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+extern const struct qmi_elem_info ipa_indication_register_req_ei[];
+extern const struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern const struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern const struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern const struct qmi_elem_info ipa_mem_bounds_ei[];
+extern const struct qmi_elem_info ipa_mem_array_ei[];
+extern const struct qmi_elem_info ipa_mem_range_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern const struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 22f067741d9b..ddd529153e15 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -86,6 +86,8 @@ static const struct ipa_regs *ipa_regs(enum ipa_version version)
return &ipa_regs_v4_2;
case IPA_VERSION_4_5:
return &ipa_regs_v4_5;
+ case IPA_VERSION_4_7:
+ return &ipa_regs_v4_7;
case IPA_VERSION_4_9:
return &ipa_regs_v4_9;
case IPA_VERSION_4_11:
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 7bf70f70f63f..ff64b19a4df8 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -658,6 +658,7 @@ extern const struct ipa_regs ipa_regs_v3_1;
extern const struct ipa_regs ipa_regs_v3_5_1;
extern const struct ipa_regs ipa_regs_v4_2;
extern const struct ipa_regs ipa_regs_v4_5;
+extern const struct ipa_regs ipa_regs_v4_7;
extern const struct ipa_regs ipa_regs_v4_9;
extern const struct ipa_regs ipa_regs_v4_11;
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index 5cbc15a971f9..14bd2f903045 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -46,7 +46,7 @@ version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_version_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_version_string(ipa));
}
static DEVICE_ATTR_RO(version);
@@ -70,7 +70,7 @@ static ssize_t rx_offload_show(struct device *dev,
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(rx_offload);
@@ -80,7 +80,7 @@ static ssize_t tx_offload_show(struct device *dev,
{
struct ipa *ipa = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+ return sysfs_emit(buf, "%s\n", ipa_offload_string(ipa));
}
static DEVICE_ATTR_RO(tx_offload);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 510ff2dc8999..b81e27b61354 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -32,8 +32,8 @@
* endian 64-bit "slot" that holds the address of a rule definition. (The
* size of these slots is 64 bits regardless of the host DMA address size.)
*
- * Separate tables (both filter and route) used for IPv4 and IPv6. There
- * are normally another set of "hashed" filter and route tables, which are
+ * Separate tables (both filter and route) are used for IPv4 and IPv6. There
+ * is normally another set of "hashed" filter and route tables, which are
* used with a hash of message metadata. Hashed operation is not supported
* by all IPA hardware (IPA v4.2 doesn't support hashed tables).
*
@@ -51,19 +51,32 @@
* Each filter rule is associated with an AP or modem TX endpoint, though
* not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
- * the table. The low-order bit (bit 0) in this bitmap represents a
- * special global filter, which applies to all traffic. This is not
- * used in the current code. Bit 1, if set, indicates that there is an
- * entry (i.e. slot containing a system address referring to a rule) for
- * endpoint 0 in the table. Bit 3, if set, indicates there is an entry
- * for endpoint 2, and so on. Space is set aside in IPA local memory to
- * hold as many filter table entries as might be required, but typically
- * they are not all used.
+ * the table. Each set bit in this bitmap indicates the presence of the
+ * address of a filter rule in the memory following the bitmap. Until IPA
+ * v5.0, the low-order bit (bit 0) in this bitmap represents a special
+ * global filter, which applies to all traffic. Otherwise the position of
+ * each set bit represents an endpoint for which a filter rule is defined.
+ *
+ * The global rule is not used in current code, and support for it is
+ * removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
+ * position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
+ * bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
+ * the presence of a filter rule for endpoint X by bit (X + 1) being set.
+ * I.e., bit 1 set indicates the presence of a filter rule for endpoint 0,
+ * and bit 3 set means there is a filter rule present for endpoint 2.
+ *
+ * Each filter table entry has the address of a set of equations that
+ * implement a filter rule. So following the endpoint bitmap there
+ * will be such an address/entry for each endpoint with a set bit in
+ * the bitmap.
*
* The AP initializes all entries in a filter table to refer to a "zero"
- * entry. Once initialized the modem and AP update the entries for
- * endpoints they "own" directly. Currently the AP does not use the
- * IPA filtering functionality.
+ * rule. Once initialized, the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the IPA
+ * filtering functionality.
+ *
+ * This diagram shows an example of a filter table with an endpoint
+ * bitmap as defined prior to IPA v5.0.
*
* IPA Filter Table
* ----------------------
@@ -106,12 +119,6 @@
* ----------------------
*/
-/* Assignment of route table entries to the modem and AP */
-#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
-#define IPA_ROUTE_AP_COUNT \
- (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
-
/* Filter or route rules consist of a set of 32-bit values followed by a
* 32-bit all-zero rule list terminator. The "zero rule" is simply an
* all-zero rule followed by the list terminator.
@@ -135,85 +142,40 @@ static void ipa_table_validate_build(void)
* assumes that it can be written using a pointer to __le64.
*/
BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
-
- /* Impose a practical limit on the number of routes */
- BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
- /* The modem must be allotted at least one route table entry */
- BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
- /* But it can't have more than what is available */
- BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
-
-}
-
-static bool
-ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
-{
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
- struct device *dev = &ipa->pdev->dev;
- u32 size;
-
- if (route)
- size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
- else
- size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
-
- if (!ipa_cmd_table_valid(ipa, mem, route))
- return false;
-
- /* mem->size >= size is sufficient, but we'll demand more */
- if (mem->size == size)
- return true;
-
- /* Hashed table regions can be zero size if hashing is not supported */
- if (ipa_table_hash_support(ipa) && !mem->size)
- return true;
-
- dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
- route ? "route" : "filter", mem_id, mem->size, size);
-
- return false;
}
-/* Verify the filter and route table memory regions are the expected size */
-bool ipa_table_valid(struct ipa *ipa)
+static const struct ipa_mem *
+ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
{
- bool valid;
-
- valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
-
- if (!ipa_table_hash_support(ipa))
- return valid;
-
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
- false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
- false);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
- true);
- valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
- true);
-
- return valid;
+ enum ipa_mem_id mem_id;
+
+ mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
+ : IPA_MEM_V4_FILTER_HASHED
+ : ipv6 ? IPA_MEM_V6_FILTER
+ : IPA_MEM_V4_FILTER
+ : hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED
+ : IPA_MEM_V4_ROUTE_HASHED
+ : ipv6 ? IPA_MEM_V6_ROUTE
+ : IPA_MEM_V4_ROUTE;
+
+ return ipa_mem_find(ipa, mem_id);
}
-bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
struct device *dev = &ipa->pdev->dev;
u32 count;
- if (!filter_map) {
+ if (!filtered) {
dev_err(dev, "at least one filtering endpoint is required\n");
return false;
}
- count = hweight32(filter_map);
- if (count > IPA_FILTER_COUNT_MAX) {
- dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
- count, IPA_FILTER_COUNT_MAX);
+ count = hweight64(filtered);
+ if (count > ipa->filter_count) {
+ dev_err(dev, "too many filtering endpoints (%u > %u)\n",
+ count, ipa->filter_count);
return false;
}
@@ -229,7 +191,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
if (!count)
return 0;
- WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX));
+ WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count));
/* Skip over the zero rule and possibly the filter mask */
skip = filter_mask ? 1 : 2;
@@ -238,16 +200,17 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
- u16 first, u16 count, enum ipa_mem_id mem_id)
+ bool hashed, bool ipv6, u16 first, u16 count)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
+ const struct ipa_mem *mem;
dma_addr_t addr;
u32 offset;
u16 size;
- /* Nothing to do if the table memory region is empty */
- if (!mem->size)
+ /* Nothing to do if the memory region is doesn't exist or is empty */
+ mem = ipa_table_mem(ipa, filter, hashed, ipv6);
+ if (!mem || !mem->size)
return;
if (filter)
@@ -265,14 +228,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
-ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
+ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
{
- u32 ep_mask = ipa->filter_map;
- u32 count = hweight32(ep_mask);
+ u64 ep_mask = ipa->filtered;
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
- trans = ipa_cmd_trans_alloc(ipa, count);
+ trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s filter reset\n",
@@ -291,7 +253,7 @@ ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
if (endpoint->ee_id != ee_id)
continue;
- ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
+ ipa_table_reset_add(trans, true, hashed, ipv6, endpoint_id, 1);
}
gsi_trans_commit_wait(trans);
@@ -307,18 +269,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
+ ret = ipa_filter_reset_table(ipa, false, false, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
+ ret = ipa_filter_reset_table(ipa, true, false, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
+ ret = ipa_filter_reset_table(ipa, false, true, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
+ ret = ipa_filter_reset_table(ipa, true, true, modem);
return ret;
}
@@ -329,6 +291,7 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
* */
static int ipa_route_reset(struct ipa *ipa, bool modem)
{
+ u32 modem_route_count = ipa->modem_route_count;
struct gsi_trans *trans;
u16 first;
u16 count;
@@ -342,20 +305,18 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
}
if (modem) {
- first = IPA_ROUTE_MODEM_MIN;
- count = IPA_ROUTE_MODEM_COUNT;
+ first = 0;
+ count = modem_route_count;
} else {
- first = IPA_ROUTE_AP_MIN;
- count = IPA_ROUTE_AP_COUNT;
+ first = modem_route_count;
+ count = ipa->route_count - modem_route_count;
}
- ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
- ipa_table_reset_add(trans, false, first, count,
- IPA_MEM_V4_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, false, false, first, count);
+ ipa_table_reset_add(trans, false, true, false, first, count);
- ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
- ipa_table_reset_add(trans, false, first, count,
- IPA_MEM_V6_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, false, true, first, count);
+ ipa_table_reset_add(trans, false, true, true, first, count);
gsi_trans_commit_wait(trans);
@@ -413,16 +374,15 @@ int ipa_table_hash_flush(struct ipa *ipa)
return 0;
}
-static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
- enum ipa_cmd_opcode opcode,
- enum ipa_mem_id mem_id,
- enum ipa_mem_id hash_mem_id)
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter, bool ipv6)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
- const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
+ const struct ipa_mem *hash_mem;
+ enum ipa_cmd_opcode opcode;
+ const struct ipa_mem *mem;
dma_addr_t hash_addr;
dma_addr_t addr;
+ u32 hash_offset;
u32 zero_offset;
u16 hash_count;
u32 zero_size;
@@ -430,6 +390,16 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
u16 count;
u16 size;
+ opcode = filter ? ipv6 ? IPA_CMD_IP_V6_FILTER_INIT
+ : IPA_CMD_IP_V4_FILTER_INIT
+ : ipv6 ? IPA_CMD_IP_V6_ROUTING_INIT
+ : IPA_CMD_IP_V4_ROUTING_INIT;
+
+ /* The non-hashed region will exist (see ipa_table_mem_valid()) */
+ mem = ipa_table_mem(ipa, filter, false, ipv6);
+ hash_mem = ipa_table_mem(ipa, filter, true, ipv6);
+ hash_offset = hash_mem ? hash_mem->offset : 0;
+
/* Compute the number of table entries to initialize */
if (filter) {
/* The number of filtering endpoints determines number of
@@ -437,14 +407,14 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
* to hold the bitmap itself. The size of the hashed filter
* table is either the same as the non-hashed one, or zero.
*/
- count = 1 + hweight32(ipa->filter_map);
- hash_count = hash_mem->size ? count : 0;
+ count = 1 + hweight64(ipa->filtered);
+ hash_count = hash_mem && hash_mem->size ? count : 0;
} else {
/* The size of a route table region determines the number
* of entries it has.
*/
count = mem->size / sizeof(__le64);
- hash_count = hash_mem->size / sizeof(__le64);
+ hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0;
}
size = count * sizeof(__le64);
hash_size = hash_count * sizeof(__le64);
@@ -453,7 +423,7 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
hash_addr = ipa_table_addr(ipa, filter, hash_count);
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
- hash_size, hash_mem->offset, hash_addr);
+ hash_size, hash_offset, hash_addr);
if (!filter)
return;
@@ -466,7 +436,7 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
return;
/* Zero the unused space in the hashed filter table */
- zero_offset = hash_mem->offset + hash_size;
+ zero_offset = hash_offset + hash_size;
zero_size = hash_mem->size - hash_size;
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
ipa->zero_addr, true);
@@ -495,17 +465,10 @@ int ipa_table_setup(struct ipa *ipa)
return -EBUSY;
}
- ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
- IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
-
- ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
- IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
-
- ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
- IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
-
- ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
- IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
+ ipa_table_init_add(trans, false, false);
+ ipa_table_init_add(trans, false, true);
+ ipa_table_init_add(trans, true, false);
+ ipa_table_init_add(trans, true, true);
gsi_trans_commit_wait(trans);
@@ -542,7 +505,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
static void ipa_filter_config(struct ipa *ipa, bool modem)
{
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
- u32 ep_mask = ipa->filter_map;
+ u64 ep_mask = ipa->filtered;
if (!ipa_table_hash_support(ipa))
return;
@@ -559,10 +522,9 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
}
}
-static bool ipa_route_id_modem(u32 route_id)
+static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
{
- return route_id >= IPA_ROUTE_MODEM_MIN &&
- route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
+ return route_id < ipa->modem_route_count;
}
/**
@@ -597,8 +559,8 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
if (!ipa_table_hash_support(ipa))
return;
- for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
- if (ipa_route_id_modem(route_id) == modem)
+ for (route_id = 0; route_id < ipa->route_count; route_id++)
+ if (ipa_route_id_modem(ipa, route_id) == modem)
ipa_route_tuple_zero(ipa, route_id);
}
@@ -611,14 +573,94 @@ void ipa_table_config(struct ipa *ipa)
ipa_route_config(ipa, true);
}
-/*
- * Initialize a coherent DMA allocation containing initialized filter and
+/* Verify the sizes of all IPA table filter or routing table memory regions
+ * are valid. If valid, this records the size of the routing table.
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
+{
+ bool hash_support = ipa_table_hash_support(ipa);
+ const struct ipa_mem *mem_hashed;
+ const struct ipa_mem *mem_ipv4;
+ const struct ipa_mem *mem_ipv6;
+ u32 count;
+
+ /* IPv4 and IPv6 non-hashed tables are expected to be defined and
+ * have the same size. Both must have at least two entries (and
+ * would normally have more than that).
+ */
+ mem_ipv4 = ipa_table_mem(ipa, filter, false, false);
+ if (!mem_ipv4)
+ return false;
+
+ mem_ipv6 = ipa_table_mem(ipa, filter, false, true);
+ if (!mem_ipv6)
+ return false;
+
+ if (mem_ipv4->size != mem_ipv6->size)
+ return false;
+
+ /* Compute and record the number of entries for each table type */
+ count = mem_ipv4->size / sizeof(__le64);
+ if (count < 2)
+ return false;
+ if (filter)
+ ipa->filter_count = count - 1; /* Filter map in first entry */
+ else
+ ipa->route_count = count;
+
+ /* Table offset and size must fit in TABLE_INIT command fields */
+ if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter))
+ return false;
+
+ /* Make sure the regions are big enough */
+ if (filter) {
+ /* Filter tables must able to hold the endpoint bitmap plus
+ * an entry for each endpoint that supports filtering
+ */
+ if (count < 1 + hweight64(ipa->filtered))
+ return false;
+ } else {
+ /* Routing tables must be able to hold all modem entries,
+ * plus at least one entry for the AP.
+ */
+ if (count < ipa->modem_route_count + 1)
+ return false;
+ }
+
+ /* If hashing is supported, hashed tables are expected to be defined,
+ * and have the same size as non-hashed tables. If hashing is not
+ * supported, hashed tables are expected to have zero size (or not
+ * be defined).
+ */
+ mem_hashed = ipa_table_mem(ipa, filter, true, false);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv4->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ /* Same check for IPv6 tables */
+ mem_hashed = ipa_table_mem(ipa, filter, true, true);
+ if (hash_support) {
+ if (!mem_hashed || mem_hashed->size != mem_ipv6->size)
+ return false;
+ } else {
+ if (mem_hashed && mem_hashed->size)
+ return false;
+ }
+
+ return true;
+}
+
+/* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
* filter or route table.
*
* The first entry in a filter table contains a bitmap indicating which
* endpoints contain entries in the table. In addition to that first entry,
- * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
+ * there is a fixed maximum number of entries that follow. Filter table
* entries are 64 bits wide, and (other than the bitmap) contain the DMA
* address of a filter rule. A "zero rule" indicates no filtering, and
* consists of 64 bits of zeroes. When a filter table is initialized (or
@@ -629,12 +671,6 @@ void ipa_table_config(struct ipa *ipa)
* when a route table is initialized or reset, its entries are made to refer
* to the zero rule. The zero rule is shared for route and filter tables.
*
- * Note that the IPA hardware requires a filter or route rule address to be
- * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
- * has a minimum alignment, and we place the zero rule at the base of that
- * allocated space. In ipa_table_init() we verify the minimum DMA allocation
- * meets our requirement.
- *
* +-------------------+
* --> | zero rule |
* / |-------------------|
@@ -642,8 +678,8 @@ void ipa_table_config(struct ipa *ipa)
* |\ |-------------------|
* | ---- zero rule address | \
* |\ |-------------------| |
- * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
- * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
+ * | ---- zero rule address | | Max IPA filter count
+ * | |-------------------| > or IPA route count,
* | ... | whichever is greater
* \ |-------------------| |
* ---- zero rule address | /
@@ -651,15 +687,17 @@ void ipa_table_config(struct ipa *ipa)
*/
int ipa_table_init(struct ipa *ipa)
{
- u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
struct device *dev = &ipa->pdev->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
size_t size;
+ u32 count;
ipa_table_validate_build();
+ count = max_t(u32, ipa->filter_count, ipa->route_count);
+
/* The IPA hardware requires route and filter table rules to be
* aligned on a 128-byte boundary. We put the "zero rule" at the
* base of the table area allocated here. The DMA address returned
@@ -677,12 +715,16 @@ int ipa_table_init(struct ipa *ipa)
/* First slot is the zero rule */
*virt++ = 0;
- /* Next is the filter table bitmap. The "soft" bitmap value
- * must be converted to the hardware representation by shifting
- * it left one position. (Bit 0 repesents global filtering,
- * which is possible but not used.)
+ /* Next is the filter table bitmap. The "soft" bitmap value might
+ * need to be converted to the hardware representation by shifting
+ * it left one position. Prior to IPA v5.0, bit 0 repesents global
+ * filtering, which is possible but not used. IPA v5.0+ eliminated
+ * that option, so there's no shifting required.
*/
- *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
+ if (ipa->version < IPA_VERSION_5_0)
+ *virt++ = cpu_to_le64(ipa->filtered << 1);
+ else
+ *virt++ = cpu_to_le64(ipa->filtered);
/* All the rest contain the DMA address of the zero rule */
le_addr = cpu_to_le64(addr);
@@ -694,7 +736,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
- u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
struct device *dev = &ipa->pdev->dev;
size_t size;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 395189f75d78..7cc951904bb4 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -10,31 +10,14 @@
struct ipa;
-/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
-#define IPA_FILTER_COUNT_MAX 14
-
-/* The number of route table entries allotted to the modem */
-#define IPA_ROUTE_MODEM_COUNT 8
-
-/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
-#define IPA_ROUTE_COUNT_MAX 15
-
/**
- * ipa_table_valid() - Validate route and filter table memory regions
+ * ipa_filtered_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer
+ * @filtered: Filter table endpoint bitmap to check
*
* Return: true if all regions are valid, false otherwise
*/
-bool ipa_table_valid(struct ipa *ipa);
-
-/**
- * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
- * @ipa: IPA pointer
- * @filter_mask: Filter table endpoint bitmap to check
- *
- * Return: true if all regions are valid, false otherwise
- */
-bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
+bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
/**
* ipa_table_hash_support() - Return true if hashed tables are supported
@@ -86,4 +69,11 @@ int ipa_table_init(struct ipa *ipa);
*/
void ipa_table_exit(struct ipa *ipa);
+/**
+ * ipa_table_mem_valid() - Validate sizes of table memory regions
+ * @ipa: IPA pointer
+ * @filter: Whether to check filter or routing tables
+ */
+bool ipa_table_mem_valid(struct ipa *ipa, bool filter);
+
#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 7870e0cc3d7c..d15821467743 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -19,6 +19,7 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
@@ -36,6 +37,7 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_5_0,
IPA_VERSION_COUNT, /* Last; not a version */
};
@@ -46,8 +48,10 @@ static inline bool ipa_version_supported(enum ipa_version version)
case IPA_VERSION_3_5_1:
case IPA_VERSION_4_2:
case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
case IPA_VERSION_4_9:
case IPA_VERSION_4_11:
+ case IPA_VERSION_5_0:
return true;
default:
return false;
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 0d002c3c38a2..677ece3bce9e 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -103,7 +103,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -116,7 +116,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
@@ -386,13 +386,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index 6e2f939b18f1..b9c6a50de243 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -108,7 +108,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -121,7 +121,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
@@ -397,13 +397,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 8fd36569bb9f..9a315130530d 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -140,7 +140,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -151,7 +151,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -453,13 +453,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index f8e78e1907c8..7a95149f8ec7 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -132,7 +132,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
@@ -399,13 +399,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index d32b805abb11..587eb8d4e00f 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -134,7 +134,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -145,7 +145,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -472,13 +472,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
new file mode 100644
index 000000000000..21f8a58e59a0
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_7 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index eabbc5451937..1f67a03fe599 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -139,7 +139,7 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
@@ -150,7 +150,7 @@ static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
@@ -450,13 +450,16 @@ static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
static const struct ipa_reg *ipa_reg_array[] = {
[COMP_CFG] = &ipa_reg_comp_cfg,