summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 01:51:09 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 01:51:09 +0300
commitdbe69e43372212527abf48609aba7fc39a6daa27 (patch)
tree96cfafdf70f5325ceeac1054daf7deca339c9730 /drivers/net/ipa
parenta6eaf3850cb171c328a8b0db6d3c79286a1eba9d (diff)
parentb6df00789e2831fff7a2c65aa7164b2a4dcbe599 (diff)
downloadlinux-dbe69e43372212527abf48609aba7fc39a6daa27.tar.xz
Merge tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - BPF: - add syscall program type and libbpf support for generating instructions and bindings for in-kernel BPF loaders (BPF loaders for BPF), this is a stepping stone for signed BPF programs - infrastructure to migrate TCP child sockets from one listener to another in the same reuseport group/map to improve flexibility of service hand-off/restart - add broadcast support to XDP redirect - allow bypass of the lockless qdisc to improving performance (for pktgen: +23% with one thread, +44% with 2 threads) - add a simpler version of "DO_ONCE()" which does not require jump labels, intended for slow-path usage - virtio/vsock: introduce SOCK_SEQPACKET support - add getsocketopt to retrieve netns cookie - ip: treat lowest address of a IPv4 subnet as ordinary unicast address allowing reclaiming of precious IPv4 addresses - ipv6: use prandom_u32() for ID generation - ip: add support for more flexible field selection for hashing across multi-path routes (w/ offload to mlxsw) - icmp: add support for extended RFC 8335 PROBE (ping) - seg6: add support for SRv6 End.DT46 behavior - mptcp: - DSS checksum support (RFC 8684) to detect middlebox meddling - support Connection-time 'C' flag - time stamping support - sctp: packetization Layer Path MTU Discovery (RFC 8899) - xfrm: speed up state addition with seq set - WiFi: - hidden AP discovery on 6 GHz and other HE 6 GHz improvements - aggregation handling improvements for some drivers - minstrel improvements for no-ack frames - deferred rate control for TXQs to improve reaction times - switch from round robin to virtual time-based airtime scheduler - add trace points: - tcp checksum errors - openvswitch - action execution, upcalls - socket errors via sk_error_report Device APIs: - devlink: add rate API for hierarchical control of max egress rate of virtual devices (VFs, SFs etc.) - don't require RCU read lock to be held around BPF hooks in NAPI context - page_pool: generic buffer recycling New hardware/drivers: - mobile: - iosm: PCIe Driver for Intel M.2 Modem - support for Qualcomm MSM8998 (ipa) - WiFi: Qualcomm QCN9074 and WCN6855 PCI devices - sparx5: Microchip SparX-5 family of Enterprise Ethernet switches - Mellanox BlueField Gigabit Ethernet (control NIC of the DPU) - NXP SJA1110 Automotive Ethernet 10-port switch - Qualcomm QCA8327 switch support (qca8k) - Mikrotik 10/25G NIC (atl1c) Driver changes: - ACPI support for some MDIO, MAC and PHY devices from Marvell and NXP (our first foray into MAC/PHY description via ACPI) - HW timestamping (PTP) support: bnxt_en, ice, sja1105, hns3, tja11xx - Mellanox/Nvidia NIC (mlx5) - NIC VF offload of L2 bridging - support IRQ distribution to Sub-functions - Marvell (prestera): - add flower and match all - devlink trap - link aggregation - Netronome (nfp): connection tracking offload - Intel 1GE (igc): add AF_XDP support - Marvell DPU (octeontx2): ingress ratelimit offload - Google vNIC (gve): new ring/descriptor format support - Qualcomm mobile (rmnet & ipa): inline checksum offload support - MediaTek WiFi (mt76) - mt7915 MSI support - mt7915 Tx status reporting - mt7915 thermal sensors support - mt7921 decapsulation offload - mt7921 enable runtime pm and deep sleep - Realtek WiFi (rtw88) - beacon filter support - Tx antenna path diversity support - firmware crash information via devcoredump - Qualcomm WiFi (wcn36xx) - Wake-on-WLAN support with magic packets and GTK rekeying - Micrel PHY (ksz886x/ksz8081): add cable test support" * tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2168 commits) tcp: change ICSK_CA_PRIV_SIZE definition tcp_yeah: check struct yeah size at compile time gve: DQO: Fix off by one in gve_rx_dqo() stmmac: intel: set PCI_D3hot in suspend stmmac: intel: Enable PHY WOL option in EHL net: stmmac: option to enable PHY WOL with PMT enabled net: say "local" instead of "static" addresses in ndo_dflt_fdb_{add,del} net: use netdev_info in ndo_dflt_fdb_{add,del} ptp: Set lookup cookie when creating a PTP PPS source. net: sock: add trace for socket errors net: sock: introduce sk_error_report net: dsa: replay the local bridge FDB entries pointing to the bridge dev too net: dsa: ensure during dsa_fdb_offload_notify that dev_hold and dev_put are on the same dev net: dsa: include fdb entries pointing to bridge in the host fdb list net: dsa: include bridge addresses which are local in the host fdb list net: dsa: sync static FDB entries on foreign interfaces to hardware net: dsa: install the host MDB and FDB entries in the master's RX filter net: dsa: reference count the FDB addresses at the cross-chip notifier level net: dsa: introduce a separate cross-chip notifier type for host FDBs net: dsa: reference count the MDB entries at the cross-chip notifier level ...
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Makefile9
-rw-r--r--drivers/net/ipa/gsi.c90
-rw-r--r--drivers/net/ipa/gsi.h2
-rw-r--r--drivers/net/ipa/gsi_reg.h3
-rw-r--r--drivers/net/ipa/ipa_cmd.c40
-rw-r--r--drivers/net/ipa/ipa_data-v3.1.c533
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c45
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c66
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c54
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c69
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c70
-rw-r--r--drivers/net/ipa/ipa_data.h1
-rw-r--r--drivers/net/ipa/ipa_endpoint.c90
-rw-r--r--drivers/net/ipa/ipa_main.c55
-rw-r--r--drivers/net/ipa/ipa_mem.c264
-rw-r--r--drivers/net/ipa/ipa_mem.h26
-rw-r--r--drivers/net/ipa/ipa_qmi.c32
-rw-r--r--drivers/net/ipa/ipa_reg.h1
-rw-r--r--drivers/net/ipa/ipa_smp2p.c5
-rw-r--r--drivers/net/ipa/ipa_sysfs.c136
-rw-r--r--drivers/net/ipa/ipa_sysfs.h15
-rw-r--r--drivers/net/ipa/ipa_table.c94
-rw-r--r--drivers/net/ipa/ipa_uc.c3
-rw-r--r--drivers/net/ipa/ipa_version.h2
24 files changed, 1366 insertions, 339 deletions
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 1efe1a88104b..506f8d5cd4ee 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -7,8 +7,9 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
- ipa_resource.o ipa_qmi.o ipa_qmi_msg.o
+ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
+ ipa_sysfs.o
-ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \
- ipa_data-v4.5.o ipa_data-v4.9.o \
- ipa_data-v4.11.o
+ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
+ ipa_data-v4.2.o ipa_data-v4.5.o \
+ ipa_data-v4.9.o ipa_data-v4.11.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index e374079603cf..427c68b2ad8f 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -210,13 +210,65 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- /* The inter-EE registers are in the non-adjusted address range */
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
+ /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
+ if (gsi->version > IPA_VERSION_3_1) {
+ u32 offset;
+
+ /* These registers are in the non-adjusted address range */
+ offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
+ iowrite32(0, gsi->virt_raw + offset);
+ }
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
+/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
+static int gsi_ring_setup(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 count;
+ u32 val;
+
+ if (gsi->version < IPA_VERSION_3_5_1) {
+ /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+
+ return 0;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
+ GSI_CHANNEL_COUNT_MAX, count);
+ count = GSI_CHANNEL_COUNT_MAX;
+ }
+ gsi->channel_count = count;
+
+ count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!count) {
+ dev_err(dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(dev,
+ "limiting to %u event rings; hardware supports %u\n",
+ GSI_EVT_RING_COUNT_MAX, count);
+ count = GSI_EVT_RING_COUNT_MAX;
+ }
+ gsi->evt_ring_count = count;
+
+ return 0;
+}
+
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
@@ -1827,43 +1879,21 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
- struct device *dev = gsi->dev;
u32 val;
+ int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
if (!(val & ENABLED_FMASK)) {
- dev_err(dev, "GSI has not been enabled\n");
+ dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO;
}
gsi_irq_setup(gsi); /* No matching teardown required */
- val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
-
- gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
- if (!gsi->channel_count) {
- dev_err(dev, "GSI reports zero channels supported\n");
- return -EINVAL;
- }
- if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
- dev_warn(dev,
- "limiting to %u channels; hardware supports %u\n",
- GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
- gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
- }
-
- gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
- if (!gsi->evt_ring_count) {
- dev_err(dev, "GSI reports zero event rings supported\n");
- return -EINVAL;
- }
- if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
- dev_warn(dev,
- "limiting to %u event rings; hardware supports %u\n",
- GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
- gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
- }
+ ret = gsi_ring_setup(gsi); /* No matching teardown required */
+ if (ret)
+ return ret;
/* Initialize the error log */
iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index d5996bdb20ef..81cd7b07f6e1 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -17,7 +17,7 @@
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 23
-#define GSI_EVT_RING_COUNT_MAX 20
+#define GSI_EVT_RING_COUNT_MAX 24
/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
#define GSI_TLV_MAX 64
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index cb42c5ae86fa..bf9593d9eaea 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -52,7 +52,8 @@
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
-/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
+/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
+
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 525cdf28d9ea..af44ca41189e 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -200,41 +200,55 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_valid(struct ipa *ipa)
{
- const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
+ u32 offset;
u32 size;
- /* In ipa_cmd_hdr_init_local_add() we record the offset and size
- * of the header table memory area. Make sure the offset and size
- * fit in the fields that need to hold them, and that the entire
- * range is within the overall IPA memory range.
+ /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
+ * the header table memory area in an immediate command. Make sure
+ * the offset and size fit in the fields that need to hold them, and
+ * that the entire range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
- if (mem->offset > offset_max ||
- ipa->mem_offset > offset_max - mem->offset) {
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+
+ /* The header memory area contains both the modem and AP header
+ * regions. The modem portion defines the address of the region.
+ */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+
+ /* Make sure the offset fits in the IPA command */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- ipa->mem_offset, mem->offset, offset_max);
+ ipa->mem_offset, offset, offset_max);
return false;
}
- size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ /* Add the size of the AP portion (if defined) to the combined size */
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
+ /* Make sure the combined size fits in the IPA command */
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
- if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
+
+ /* Make sure the entire combined area fits in IPA memory */
+ if (size > ipa->mem_size || offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- mem->offset, size, ipa->mem_size);
+ offset, size, ipa->mem_size);
return false;
}
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c
new file mode 100644
index 000000000000..4c28189462a7
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-v3.1.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_HDR_SECTORS,
+ IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/* Resource groups used for an SoC having IPA v3.1 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL = 0,
+ IPA_RSRC_GROUP_SRC_DL,
+ IPA_RSRC_GROUP_SRC_DIAG,
+ IPA_RSRC_GROUP_SRC_DMA,
+ IPA_RSRC_GROUP_SRC_UNUSED,
+ IPA_RSRC_GROUP_SRC_UC_RX_Q,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL = 0,
+ IPA_RSRC_GROUP_DST_DL,
+ IPA_RSRC_GROUP_DST_DIAG_DPL,
+ IPA_RSRC_GROUP_DST_DMA,
+ IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL,
+ IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v3.1 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 8,
+ .max_reads = 8,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 2,
+ .max_reads = 8,
+ },
+};
+
+/* Endpoint data for an SoC having IPA v3.1 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 22,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 18,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 7,
+ .endpoint_id = 15,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 8,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_DL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 9,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 5,
+ .endpoint_id = 18,
+ .toward_ipa = false,
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 3, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 1, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 2, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDR_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI1_BUFFER] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 14, .max = 14,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5, /* 3 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5, /* 7 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HDRI2_BUFFERS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 19, .max = 19,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 26, .max = 26,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DIAG] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DMA] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
+ .min = 8, .max = 8,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 3, .max = 3, /* 2 downstream */
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1, /* 0 downstream */
+ },
+ /* IPA_RSRC_GROUP_DST_DMA uses 2 downstream */
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 3, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 3, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 0, .max = 255,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_ENGINE] = {
+ .min = 0, .max = 255,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DIAG_DPL] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DMA] = {
+ .min = 1, .max = 1,
+ },
+ .limits[IPA_RSRC_GROUP_DST_Q6ZIP_GENERAL] = {
+ .min = 1, .max = 1,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v3.1 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v3.1 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x0bd8,
+ .size = 0x1424,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_END_MARKER,
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v3.1 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x146bd000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x00002000,
+};
+
+/* Interconnect bandwidths are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ {
+ .name = "imem",
+ .peak_bandwidth = 640000, /* 640 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 80000, /* 80 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v3.1 */
+static const struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 16 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v3.1 */
+const struct ipa_data ipa_data_v3_1 = {
+ .version = IPA_VERSION_3_1,
+ .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
+};
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index ead1a82f32f5..af536ef8c120 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -271,77 +271,92 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index 05806ceae8b5..9353efbd504f 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -220,112 +220,134 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 8744f19c6401..3b09b7baa95f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -219,92 +219,110 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 5f67a3a909ee..a99b6478fa3a 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -265,117 +265,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index e41be790f45e..798d43e1eb13 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -263,116 +263,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288,
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 5c4c8d72d7d8..5bc244c8f94e 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -300,6 +300,7 @@ struct ipa_data {
const struct ipa_clock_data *clock_data;
};
+extern const struct ipa_data ipa_data_v3_1;
extern const struct ipa_data ipa_data_v3_5_1;
extern const struct ipa_data ipa_data_v4_2;
extern const struct ipa_data ipa_data_v4_5;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ccc99ad983eb..ab02669bae4e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -75,8 +75,6 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
-#ifdef IPA_VALIDATE
-
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -88,11 +86,6 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (ipa_gsi_endpoint_data_empty(data))
return true;
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- if (ipa->version >= IPA_VERSION_4_5)
- if (data->endpoint.config.checksum)
- return false;
-
if (!data->toward_ipa) {
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
@@ -230,27 +223,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return true;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
- const struct ipa_gsi_endpoint_data *data)
-{
- const struct ipa_gsi_endpoint_data *dp = data;
- enum ipa_endpoint_name name;
-
- if (ipa->version < IPA_VERSION_4_5)
- return true;
-
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- for (name = 0; name < count; name++, dp++)
- if (data->endpoint.config.checksum)
- return false;
-
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/* Allocate a transaction to use on a non-command endpoint */
static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
u32 tre_count)
@@ -457,28 +429,34 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ enum ipa_cs_offload_en enabled;
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->data->checksum) {
+ enum ipa_version version = endpoint->ipa->version;
+
if (endpoint->toward_ipa) {
u32 checksum_offset;
- val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
- CS_OFFLOAD_EN_FMASK);
/* Checksum header offset is in 4-byte units */
checksum_offset = sizeof(struct rmnet_map_header);
checksum_offset /= sizeof(u32);
val |= u32_encode_bits(checksum_offset,
CS_METADATA_HDR_OFFSET_FMASK);
+
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_UL
+ : IPA_CS_OFFLOAD_INLINE;
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
- CS_OFFLOAD_EN_FMASK);
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_DL
+ : IPA_CS_OFFLOAD_INLINE;
}
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
- CS_OFFLOAD_EN_FMASK);
+ enabled = IPA_CS_OFFLOAD_NONE;
}
+ val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
/* CS_GEN_QMB_MASTER_SEL is 0 */
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -498,6 +476,27 @@ static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static u32
+ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
+{
+ u32 header_size = sizeof(struct rmnet_map_header);
+
+ /* Without checksum offload, we just have the MAP header */
+ if (!endpoint->data->checksum)
+ return header_size;
+
+ if (version < IPA_VERSION_4_5) {
+ /* Checksum header inserted for AP TX endpoints only */
+ if (endpoint->toward_ipa)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+ } else {
+ /* Checksum header is used in both directions */
+ header_size += sizeof(struct rmnet_map_v5_csum_header);
+ }
+
+ return header_size;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -526,13 +525,11 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
u32 val = 0;
if (endpoint->data->qmap) {
- size_t header_size = sizeof(struct rmnet_map_header);
enum ipa_version version = ipa->version;
+ size_t header_size;
- /* We might supply a checksum header after the QMAP header */
- if (endpoint->toward_ipa && endpoint->data->checksum)
- header_size += sizeof(struct rmnet_map_ul_csum_header);
- val |= ipa_header_size_encoded(version, header_size);
+ header_size = ipa_qmap_header_size(version, endpoint);
+ val = ipa_header_size_encoded(version, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
@@ -1734,6 +1731,21 @@ int ipa_endpoint_config(struct ipa *ipa)
u32 max;
u32 val;
+ /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
+ * Furthermore, the endpoints were not grouped such that TX
+ * endpoint numbers started with 0 and RX endpoints had numbers
+ * higher than all TX endpoints, so we can't do the simple
+ * direction check used for newer hardware below.
+ *
+ * For hardware that doesn't support the FLAVOR_0 register,
+ * just set the available mask to support any endpoint, and
+ * assume the configuration is valid.
+ */
+ if (ipa->version < IPA_VERSION_3_5) {
+ ipa->available = ~0;
+ return 0;
+ }
+
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 9915603ed10b..9810c61a0320 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -31,6 +31,7 @@
#include "ipa_uc.h"
#include "ipa_interrupt.h"
#include "gsi_trans.h"
+#include "ipa_sysfs.h"
/**
* DOC: The IP Accelerator
@@ -399,16 +400,20 @@ static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
/* Implement some hardware workarounds */
if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Enable open global clocks (not needed for IPA v4.5) */
- val = GLOBAL_FMASK;
- val |= GLOBAL_2X_CLK_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
/* Disable PA mask to allow HOLB drop */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+
+ /* Enable open global clocks in the CLKON configuration */
+ val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
+ } else if (version == IPA_VERSION_3_1) {
+ val = MISC_FMASK; /* Disable MISC clock gating */
+ } else {
+ val = 0; /* No CLKON configuration needed */
}
+ if (val)
+ iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
ipa_hardware_config_comp(ipa);
@@ -529,6 +534,7 @@ static int ipa_firmware_load(struct device *dev)
}
ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
@@ -573,6 +579,10 @@ out_release_firmware:
static const struct of_device_id ipa_match[] = {
{
+ .compatible = "qcom,msm8998-ipa",
+ .data = &ipa_data_v3_1,
+ },
+ {
.compatible = "qcom,sdm845-ipa",
.data = &ipa_data_v3_5_1,
},
@@ -639,6 +649,27 @@ static void ipa_validate_build(void)
#endif /* IPA_VALIDATE */
}
+static bool ipa_version_valid(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_0:
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
@@ -676,11 +707,15 @@ static int ipa_probe(struct platform_device *pdev)
/* Get configuration data early; needed for clock initialization */
data = of_device_get_match_data(dev);
if (!data) {
- /* This is really IPA_VALIDATE (should never happen) */
dev_err(dev, "matched hardware not supported\n");
return -ENODEV;
}
+ if (!ipa_version_valid(data->version)) {
+ dev_err(dev, "invalid IPA version\n");
+ return -EINVAL;
+ }
+
/* If we need Trust Zone, make sure it's available */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
if (!modem_init)
@@ -881,6 +916,13 @@ static const struct dev_pm_ops ipa_pm_ops = {
.resume = ipa_resume,
};
+static const struct attribute_group *ipa_attribute_groups[] = {
+ &ipa_attribute_group,
+ &ipa_feature_attribute_group,
+ &ipa_modem_attribute_group,
+ NULL,
+};
+
static struct platform_driver ipa_driver = {
.probe = ipa_probe,
.remove = ipa_remove,
@@ -889,6 +931,7 @@ static struct platform_driver ipa_driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
.of_match_table = ipa_match,
+ .dev_groups = ipa_attribute_groups,
},
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 1624125e7459..4337b0920d3d 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,11 +26,26 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->id == mem_id)
+ return mem;
+ }
+
+ return NULL;
+}
+
/* Add an immediate command to a transaction that zeroes a memory region */
static void
-ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
@@ -60,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
@@ -74,39 +90,136 @@ int ipa_mem_setup(struct ipa *ipa)
return -EBUSY;
}
- /* Initialize IPA-local header memory. The modem and AP header
- * regions are contiguous, and initialized together.
+ /* Initialize IPA-local header memory. The AP header region, if
+ * present, is contiguous with and follows the modem header region,
+ * and they are initialized together.
*/
- offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
+ offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
return 0;
}
-#ifdef IPA_VALIDATE
+/* Is the given memory region ID is valid for the current IPA version? */
+static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ enum ipa_version version = ipa->version;
+
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_AP_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ case IPA_MEM_UC_EVENT_RING:
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_QUOTA_AP:
+ case IPA_MEM_END_MARKER: /* pseudo region */
+ break;
+
+ case IPA_MEM_STATS_TETHERING:
+ case IPA_MEM_STATS_DROP:
+ if (version < IPA_VERSION_4_0)
+ return false;
+ break;
+
+ case IPA_MEM_STATS_V4_FILTER:
+ case IPA_MEM_STATS_V6_FILTER:
+ case IPA_MEM_STATS_V4_ROUTE:
+ case IPA_MEM_STATS_V6_ROUTE:
+ if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
+ return false;
+ break;
+
+ case IPA_MEM_NAT_TABLE:
+ case IPA_MEM_STATS_FILTER_ROUTE:
+ if (version < IPA_VERSION_4_5)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+/* Must the given memory region be present in the configuration? */
+static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ return true;
+
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0;
+
+ default:
+ return false; /* Anything else is optional */
+ }
+}
+
+static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- const struct ipa_mem *mem = &ipa->mem[mem_id];
struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id = mem->id;
u16 size_multiple;
+ /* Make sure the memory region is valid for this version of IPA */
+ if (!ipa_mem_id_valid(ipa, mem_id)) {
+ dev_err(dev, "region id %u not valid\n", mem_id);
+ return false;
+ }
+
+ if (!mem->size && !mem->canary_count) {
+ dev_err(dev, "empty memory region %u\n", mem_id);
+ return false;
+ }
+
/* Other than modem memory, sizes must be a multiple of 8 */
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
if (mem->size % size_multiple)
@@ -117,23 +230,74 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
else if (mem->offset < mem->canary_count * sizeof(__le32))
dev_err(dev, "region %u offset too small for %hu canaries\n",
mem_id, mem->canary_count);
- else if (mem->offset + mem->size > ipa->mem_size)
- dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
- mem_id, ipa->mem_size);
+ else if (mem_id == IPA_MEM_END_MARKER && mem->size)
+ dev_err(dev, "non-zero end marker region size\n");
else
return true;
return false;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+/* Verify each defined memory region is valid. */
+static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
+ DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ u32 i;
+
+ if (mem_data->local_count > IPA_MEM_COUNT) {
+ dev_err(dev, "too many memory regions (%u > %u)\n",
+ mem_data->local_count, IPA_MEM_COUNT);
+ return false;
+ }
+
+ for (i = 0; i < mem_data->local_count; i++) {
+ const struct ipa_mem *mem = &mem_data->local[i];
+
+ if (__test_and_set_bit(mem->id, regions)) {
+ dev_err(dev, "duplicate memory region %u\n", mem->id);
+ return false;
+ }
+
+ /* Defined regions have non-zero size and/or canary count */
+ if (!ipa_mem_valid_one(ipa, mem))
+ return false;
+ }
+
+ /* Now see if any required regions are not defined */
+ for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
+ mem_id < IPA_MEM_COUNT;
+ mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ if (ipa_mem_id_required(ipa, mem_id))
+ dev_err(dev, "required memory region %u missing\n",
+ mem_id);
+ }
+
return true;
}
-#endif /*! IPA_VALIDATE */
+/* Do all memory regions fit within the IPA local memory? */
+static bool ipa_mem_size_valid(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 limit = ipa->mem_size;
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->offset + mem->size <= limit)
+ continue;
+
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem->id, limit);
+
+ return false;
+ }
+
+ return true;
+}
/**
* ipa_mem_config() - Configure IPA shared memory
@@ -144,11 +308,12 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- enum ipa_mem_id mem_id;
+ const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
void *virt;
u32 val;
+ u32 i;
/* Check the advertised location and size of the shared memory area */
val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
@@ -168,6 +333,10 @@ int ipa_mem_config(struct ipa *ipa)
mem_size);
}
+ /* We know our memory size; make sure regions are all in range */
+ if (!ipa_mem_size_valid(ipa))
+ return -EINVAL;
+
/* Prealloc DMA memory for zeroing regions */
virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
if (!virt)
@@ -176,29 +345,18 @@ int ipa_mem_config(struct ipa *ipa)
ipa->zero_virt = virt;
ipa->zero_size = IPA_MEM_MAX;
- /* Verify each defined memory region is valid, and if indicated
- * for the region, write "canary" values in the space prior to
- * the region's base address.
+ /* For each defined region, write "canary" values in the
+ * space prior to the region's base address if indicated.
*/
- for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
- const struct ipa_mem *mem = &ipa->mem[mem_id];
- u16 canary_count;
+ for (i = 0; i < ipa->mem_count; i++) {
+ u16 canary_count = ipa->mem[i].canary_count;
__le32 *canary;
- /* Validate all regions (even undefined ones) */
- if (!ipa_mem_valid(ipa, mem_id))
- goto err_dma_free;
-
- /* Skip over undefined regions */
- if (!mem->offset && !mem->size)
- continue;
-
- canary_count = mem->canary_count;
if (!canary_count)
continue;
/* Write canary values in the space before the region */
- canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
do
*--canary = IPA_MEM_CANARY_VAL;
while (--canary_count);
@@ -212,8 +370,9 @@ int ipa_mem_config(struct ipa *ipa)
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
- /* Verify the microcontroller ring alignment (0 is OK too) */
- if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ /* Verify the microcontroller ring alignment (if defined) */
+ mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
+ if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
@@ -261,11 +420,9 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return -EBUSY;
}
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
@@ -380,7 +537,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
- * to "get" a pointer it (this implies no reference counting).
+ * to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
@@ -457,11 +614,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
struct resource *res;
int ret;
- if (mem_data->local_count > IPA_MEM_COUNT) {
- dev_err(dev, "to many memory regions (%u > %u)\n",
- mem_data->local_count, IPA_MEM_COUNT);
+ /* Make sure the set of defined memory regions is valid */
+ if (!ipa_mem_valid(ipa, mem_data))
return -EINVAL;
- }
+
+ ipa->mem_count = mem_data->local_count;
+ ipa->mem = mem_data->local;
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -486,10 +644,6 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_addr = res->start;
ipa->mem_size = resource_size(res);
- /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
- ipa->mem_count = mem_data->local_count;
- ipa->mem = mem_data->local;
-
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
if (ret)
goto err_unmap;
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index a422aec69e5d..570bfdd99bff 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -54,37 +54,43 @@ enum ipa_mem_id {
IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
IPA_MEM_V6_ROUTE, /* 2 canaries */
IPA_MEM_MODEM_HEADER, /* 2 canaries */
- IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries, optional */
IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
IPA_MEM_AP_PROC_CTX, /* 0 canaries */
- IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */
- IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0/2 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary, optional */
+ IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
+ /* The next 5 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
- IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */
- IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_MODEM, /* 0/2 canaries */
- IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
+ IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
+ IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
IPA_MEM_COUNT, /* Number of regions (not an index) */
};
/**
* struct ipa_mem - IPA local memory region description
+ * @id: memory region identifier
* @offset: offset in IPA memory space to base of the region
* @size: size in bytes base of the region
* @canary_count: Number of 32-bit "canary" values that precede region
*/
struct ipa_mem {
+ enum ipa_mem_id id;
u32 offset;
u16 size;
u16 canary_count;
};
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
+
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 593665efbcf9..4661105ce7ab 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -298,32 +298,32 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
- mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
- mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
@@ -336,7 +336,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* skip_uc_load_valid and skip_uc_load are set above */
- mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
@@ -347,7 +347,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* Nothing to report for the compression table (zip_tbl_info) */
- mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
@@ -355,7 +355,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
@@ -363,22 +363,21 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- /* None of the stats fields are valid (IPA v4.0 and above) */
-
+ /* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
- mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
@@ -387,8 +386,9 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
- mem = &ipa->mem[IPA_MEM_STATS_DROP];
- if (mem->size) {
+ /* If the DROP stats region is defined, include it */
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
+ if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 286ea9634c49..b89dec5865a5 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -368,6 +368,7 @@ enum ipa_cs_offload_en {
IPA_CS_OFFLOAD_NONE = 0x0,
IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
+ IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
};
/* Valid only for TX (IPA consumer) endpoints */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index a5f7a79a1923..cf709df70d28 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -176,11 +176,8 @@ static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
int ret;
ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
- ret, name);
+ if (ret <= 0)
return ret ? : -EINVAL;
- }
irq = ret;
ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
new file mode 100644
index 000000000000..ff61dbdd70d8
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_sysfs.h"
+
+static const char *ipa_version_string(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_0:
+ return "3.0";
+ case IPA_VERSION_3_1:
+ return "3.1";
+ case IPA_VERSION_3_5:
+ return "3.5";
+ case IPA_VERSION_3_5_1:
+ return "3.5.1";
+ case IPA_VERSION_4_0:
+ return "4.0";
+ case IPA_VERSION_4_1:
+ return "4.1";
+ case IPA_VERSION_4_2:
+ return "4.2";
+ case IPA_VERSION_4_5:
+ return "4.5";
+ case IPA_VERSION_4_7:
+ return "4.7";
+ case IPA_VERSION_4_9:
+ return "4.9";
+ case IPA_VERSION_4_11:
+ return "4.11";
+ default:
+ return "0.0"; /* Won't happen (checked at probe time) */
+ }
+}
+
+static ssize_t
+version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_version_string(ipa));
+}
+
+static DEVICE_ATTR_RO(version);
+
+static struct attribute *ipa_attrs[] = {
+ &dev_attr_version.attr,
+ NULL
+};
+
+const struct attribute_group ipa_attribute_group = {
+ .attrs = ipa_attrs,
+};
+
+static const char *ipa_offload_string(struct ipa *ipa)
+{
+ return ipa->version < IPA_VERSION_4_5 ? "MAPv4" : "MAPv5";
+}
+
+static ssize_t rx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(rx_offload);
+
+static ssize_t tx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(tx_offload);
+
+static struct attribute *ipa_feature_attrs[] = {
+ &dev_attr_rx_offload.attr,
+ &dev_attr_tx_offload.attr,
+ NULL
+};
+
+const struct attribute_group ipa_feature_attribute_group = {
+ .name = "feature",
+ .attrs = ipa_feature_attrs,
+};
+
+static ssize_t
+ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+{
+ u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+}
+
+static ssize_t rx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+}
+
+static DEVICE_ATTR_RO(rx_endpoint_id);
+
+static ssize_t tx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
+}
+
+static DEVICE_ATTR_RO(tx_endpoint_id);
+
+static struct attribute *ipa_modem_attrs[] = {
+ &dev_attr_rx_endpoint_id.attr,
+ &dev_attr_tx_endpoint_id.attr,
+ NULL
+};
+
+const struct attribute_group ipa_modem_attribute_group = {
+ .name = "modem",
+ .attrs = ipa_modem_attrs,
+};
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
new file mode 100644
index 000000000000..b34e5650bf8c
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+#ifndef _IPA_SYSFS_H_
+#define _IPA_SYSFS_H_
+
+struct attribute_group;
+
+extern const struct attribute_group ipa_attribute_group;
+extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_modem_attribute_group;
+
+#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 3168d72f4245..c617a9156f26 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -150,29 +150,16 @@ static void ipa_table_validate_build(void)
}
static bool
-ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
{
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
- const struct ipa_mem *mem;
u32 size;
- if (route) {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V6_ROUTE];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V4_ROUTE];
+ if (route)
size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
- } else {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V6_FILTER];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V4_FILTER];
+ else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
- }
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
return false;
@@ -185,9 +172,8 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
if (hashed && !mem->size)
return true;
- dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter", mem->size, size);
+ dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
+ route ? "route" : "filter", mem_id, mem->size, size);
return false;
}
@@ -195,16 +181,16 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
/* Verify the filter and route table memory regions are the expected size */
bool ipa_table_valid(struct ipa *ipa)
{
- bool valid = true;
+ bool valid;
- valid = valid && ipa_table_valid_one(ipa, false, false, false);
- valid = valid && ipa_table_valid_one(ipa, false, false, true);
- valid = valid && ipa_table_valid_one(ipa, false, true, false);
- valid = valid && ipa_table_valid_one(ipa, false, true, true);
- valid = valid && ipa_table_valid_one(ipa, true, false, false);
- valid = valid && ipa_table_valid_one(ipa, true, false, true);
- valid = valid && ipa_table_valid_one(ipa, true, true, false);
- valid = valid && ipa_table_valid_one(ipa, true, true, true);
+ valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
return valid;
}
@@ -256,14 +242,15 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
- u16 first, u16 count, const struct ipa_mem *mem)
+ u16 first, u16 count, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr;
u32 offset;
u16 size;
- /* Nothing to do if the table memory regions is empty */
+ /* Nothing to do if the table memory region is empty */
if (!mem->size)
return;
@@ -282,16 +269,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
-ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
{
u32 ep_mask = ipa->filter_map;
u32 count = hweight32(ep_mask);
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
- if (!mem->size)
- return 0;
-
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -311,7 +295,7 @@ ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
if (endpoint->ee_id != ee_id)
continue;
- ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
}
gsi_trans_commit_wait(trans);
@@ -327,20 +311,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
return ret;
}
@@ -371,15 +353,13 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
count = IPA_ROUTE_AP_COUNT;
}
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE_HASHED);
gsi_trans_commit_wait(trans);
@@ -433,10 +413,12 @@ int ipa_table_hash_flush(struct ipa *ipa)
static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
enum ipa_cmd_opcode opcode,
- const struct ipa_mem *mem,
- const struct ipa_mem *hash_mem)
+ enum ipa_mem_id mem_id,
+ enum ipa_mem_id hash_mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
u16 hash_count;
@@ -477,20 +459,16 @@ int ipa_table_setup(struct ipa *ipa)
}
ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V4_ROUTE],
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V6_ROUTE],
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
- &ipa->mem[IPA_MEM_V4_FILTER],
- &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+ IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
- &ipa->mem[IPA_MEM_V6_FILTER],
- &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+ IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
gsi_trans_commit_wait(trans);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 2756363e6938..fd9219863234 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -116,7 +116,8 @@ enum ipa_uc_event {
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
- u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+ const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
+ u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index ee2b3d02f3cd..6c16c895d842 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -21,6 +21,8 @@
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
*
* Defines the version of IPA (and GSI) hardware present on the platform.
+ * Please update ipa_version_valid() and ipa_version_string() whenever a
+ * new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,