summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c56
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h28
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c721
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h211
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h525
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5709
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1772
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h368
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c515
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1959
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c472
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c816
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h502
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h917
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
27 files changed, 15811 insertions, 226 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f33fd22b351c..3238aa7f5dac 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -167,4 +167,7 @@ config SKY2_DEBUG
If unsure, say N.
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 55d4d10aa7d3..89dea7284d5b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeontx2/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 62f204f32316..1e9bcbdc6a90 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2733,17 +2733,17 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
memset(&res, 0, sizeof(res));
if (of_irq_to_resource(pnp, 0, &res) <= 0) {
- dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
+ dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
return -EINVAL;
}
if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
- dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
+ dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
return -EINVAL;
}
if (ppd.port_number >= 3) {
- dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
+ dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc80a678abc3..5bfd349bf41a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -221,6 +221,8 @@
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVNETA_GMAC_CTRL_4 0x2c90
+#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
#define MVNETA_MIB_COUNTERS_BASE 0x3000
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -1890,8 +1892,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+ dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(data);
}
}
@@ -2008,8 +2010,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
} else {
@@ -2039,9 +2041,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
@@ -2065,10 +2066,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- if (dev->features & NETIF_F_GRO)
- napi_gro_receive(napi, rxq->skb);
- else
- netif_receive_skb(rxq->skb);
+ napi_gro_receive(napi, rxq->skb);
/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
@@ -2396,7 +2394,7 @@ error:
}
/* Main tx processing */
-static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
@@ -2510,12 +2508,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu = smp_processor_id();
while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id);
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, cpu);
if (txq->count)
mvneta_txq_done(pp, txq);
@@ -3344,6 +3343,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_QSGMII &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_2500BASEX &&
!phy_interface_mode_is_8023z(state->interface) &&
!phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3356,9 +3356,15 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
/* Asymmetric pause is unsupported */
phylink_set(mask, Pause);
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+
+ /* We cannot use 1Gbps when using the 2.5G interface. */
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ } else {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
if (!phy_interface_mode_is_8023z(state->interface)) {
/* 10M and 100M are only supported in non-802.3z mode */
@@ -3419,12 +3425,14 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
struct mvneta_port *pp = netdev_priv(ndev);
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
MVNETA_GMAC2_PORT_RESET);
+ new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_INBAND_RESTART_AN |
@@ -3457,7 +3465,7 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
if (state->duplex)
new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
- if (state->speed == SPEED_1000)
+ if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else if (state->speed == SPEED_100)
new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
@@ -3496,10 +3504,18 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
MVNETA_GMAC_FORCE_LINK_DOWN);
}
+ /* When at 2.5G, the link partner can send frames with shortened
+ * preambles.
+ */
+ if (state->speed == SPEED_2500)
+ new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
+
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_ctrl4 != gmac_ctrl4)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
if (new_clk != gmac_clk)
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
if (new_an != gmac_an)
@@ -3793,9 +3809,6 @@ static int mvneta_open(struct net_device *dev)
goto err_free_online_hp;
}
- /* In default link is down */
- netif_carrier_off(pp->dev);
-
ret = mvneta_mdio_probe(pp);
if (ret < 0) {
netdev_err(dev, "cannot probe MDIO bus\n");
@@ -4598,7 +4611,8 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 67b9e81b7c02..176c6b56fdcc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -253,7 +253,8 @@
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
-#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
+ ((version) == MVPP21 ? 0xffff : 0xff)
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
@@ -330,6 +331,7 @@
#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014
#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
#define MVPP2_TXP_SCHED_MTU_REG 0x801c
#define MVPP2_TXP_MTU_MAX 0x7FFFF
@@ -613,6 +615,7 @@
/* Port flags */
#define MVPP2_F_LOOPBACK BIT(0)
+#define MVPP2_F_DT_COMPAT BIT(1)
/* Marvell tag types */
enum mvpp2_tag_type {
@@ -662,7 +665,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K
-#define MVPP2_MAX_THREADS 8
+#define MVPP2_MAX_THREADS 9
#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
/* GMAC MIB Counters register definitions */
@@ -734,6 +737,11 @@ struct mvpp2 {
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ /* Number of Tx threads used */
+ unsigned int nthreads;
+ /* Map of threads needing locking */
+ unsigned long lock_map;
+
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
@@ -823,6 +831,12 @@ struct mvpp2_port {
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
+ /* Protect the BM refills and the Tx paths when a thread is used on more
+ * than a single CPU.
+ */
+ spinlock_t bm_lock[MVPP2_MAX_THREADS];
+ spinlock_t tx_lock[MVPP2_MAX_THREADS];
+
/* Flags */
unsigned long flags;
@@ -969,7 +983,7 @@ struct mvpp2_txq_pcpu_buf {
/* Per-CPU Tx queue control */
struct mvpp2_txq_pcpu {
- int cpu;
+ unsigned int thread;
/* Number of Tx DMA descriptors in the descriptor ring */
int size;
@@ -1095,14 +1109,6 @@ struct mvpp2_bm_pool {
void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset);
-
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data);
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
-
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
- u32 data);
-
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 32d785b616e1..14f9679c957c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
*/
static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
const struct phylink_link_state *state);
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -80,13 +82,19 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
return readl(priv->swth_base[0] + offset);
}
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
{
return readl_relaxed(priv->swth_base[0] + offset);
}
+
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+ return cpu % priv->nthreads;
+}
+
/* These accessors should be used to access:
*
- * - per-CPU registers, where each CPU has its own copy of the
+ * - per-thread registers, where each thread has its own copy of the
* register.
*
* MVPP2_BM_VIRT_ALLOC_REG
@@ -102,8 +110,8 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
* MVPP2_TXQ_SENT_REG
* MVPP2_RXQ_NUM_REG
*
- * - global registers that must be accessed through a specific CPU
- * window, because they are related to an access to a per-CPU
+ * - global registers that must be accessed through a specific thread
+ * window, because they are related to an access to a per-thread
* register
*
* MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
@@ -120,28 +128,28 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
* MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
* MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
*/
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
u32 offset, u32 data)
{
- writel(data, priv->swth_base[cpu] + offset);
+ writel(data, priv->swth_base[thread] + offset);
}
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
u32 offset)
{
- return readl(priv->swth_base[cpu] + offset);
+ return readl(priv->swth_base[thread] + offset);
}
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
u32 offset, u32 data)
{
- writel_relaxed(data, priv->swth_base[cpu] + offset);
+ writel_relaxed(data, priv->swth_base[thread] + offset);
}
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
u32 offset)
{
- return readl_relaxed(priv->swth_base[cpu] + offset);
+ return readl_relaxed(priv->swth_base[thread] + offset);
}
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
@@ -383,17 +391,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
dma_addr_t *dma_addr,
phys_addr_t *phys_addr)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
- *dma_addr = mvpp2_percpu_read(priv, cpu,
+ *dma_addr = mvpp2_thread_read(priv, thread,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+ *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
if (priv->hw_version == MVPP22) {
u32 val;
u32 dma_addr_highbits, phys_addr_highbits;
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
@@ -624,7 +632,11 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
phys_addr_t buf_phys_addr)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned long flags = 0;
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) {
u32 val = 0;
@@ -638,7 +650,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP22_BM_ADDR_HIGH_RLS_REG, val);
}
@@ -647,11 +659,14 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
* descriptor. Instead of storing the virtual address, we
* store the physical address
*/
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->bm_lock[thread], flags);
+
put_cpu();
}
@@ -884,7 +899,7 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
}
-/* Mask the current CPU's Rx/Tx interrupts
+/* Mask the current thread's Rx/Tx interrupts
* Called by on_each_cpu(), guaranteed to run with migration disabled,
* using smp_processor_id() is OK.
*/
@@ -892,11 +907,16 @@ static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
-/* Unmask the current CPU's Rx/Tx interrupts.
+/* Unmask the current thread's Rx/Tx interrupts.
* Called by on_each_cpu(), guaranteed to run with migration disabled,
* using smp_processor_id() is OK.
*/
@@ -905,12 +925,17 @@ static void mvpp2_interrupts_unmask(void *arg)
struct mvpp2_port *port = arg;
u32 val;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
}
@@ -926,7 +951,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
if (mask)
val = 0;
else
- val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *v = port->qvecs + i;
@@ -934,7 +959,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
continue;
- mvpp2_percpu_write(port->priv, v->sw_thread_id,
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
}
}
@@ -1423,6 +1448,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
tx_port_num);
mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+ /* Set TXQ scheduling to Round-Robin */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
+
/* Close bandwidth for all queues */
for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
ptxq = mvpp2_txq_phys(port->id, queue);
@@ -1622,7 +1650,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -1632,14 +1661,15 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
* Called only from mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
struct mvpp2_tx_queue *aggr_txq, int num)
{
if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
/* Update number of occupied aggregated Tx descriptors */
- int cpu = smp_processor_id();
- u32 val = mvpp2_read_relaxed(priv,
- MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+ unsigned int thread =
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ u32 val = mvpp2_read_relaxed(port->priv,
+ MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
@@ -1655,16 +1685,17 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
* only by mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq, int num)
{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2 *priv = port->priv;
u32 val;
- int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -1672,12 +1703,13 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
/* Check if there are enough reserved descriptors for transmission.
* If not, request chunk of reserved descriptors and check again.
*/
-static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu,
int num)
{
- int req, cpu, desc_count;
+ int req, desc_count;
+ unsigned int thread;
if (txq_pcpu->reserved_num >= num)
return 0;
@@ -1688,10 +1720,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
desc_count = 0;
/* Compute total of used descriptors */
- for_each_present_cpu(cpu) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
struct mvpp2_txq_pcpu *txq_pcpu_aux;
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
desc_count += txq_pcpu_aux->count;
desc_count += txq_pcpu_aux->reserved_num;
}
@@ -1700,10 +1732,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
desc_count += req;
if (desc_count >
- (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+ (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */
if (txq_pcpu->reserved_num < num)
@@ -1723,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -1757,7 +1789,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
/* Get number of sent descriptors and decrement counter.
* The number of sent descriptors is returned.
- * Per-CPU access
+ * Per-thread access
*
* Called only from mvpp2_txq_done(), called from mvpp2_tx()
* (migration disabled) and from the TX completion tasklet (migration
@@ -1769,7 +1801,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
+ val = mvpp2_thread_read_relaxed(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
@@ -1784,10 +1817,15 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
struct mvpp2_port *port = arg;
int queue;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
for (queue = 0; queue < port->ntxqs; queue++) {
int id = port->txqs[queue]->id;
- mvpp2_percpu_read(port->priv, smp_processor_id(),
+ mvpp2_thread_read(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(id));
}
}
@@ -1847,13 +1885,13 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
rxq->pkts_coal);
put_cpu();
@@ -1863,15 +1901,15 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu();
}
@@ -1972,7 +2010,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
int tx_done;
- if (txq_pcpu->cpu != smp_processor_id())
+ if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq);
@@ -1988,7 +2026,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
}
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
- int cpu)
+ unsigned int thread)
{
struct mvpp2_tx_queue *txq;
struct mvpp2_txq_pcpu *txq_pcpu;
@@ -1999,7 +2037,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
if (!txq)
break;
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
if (txq_pcpu->count) {
mvpp2_txq_done(port, txq, txq_pcpu);
@@ -2015,8 +2053,8 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
/* Allocate and initialize descriptors for aggr TXQ */
static int mvpp2_aggr_txq_init(struct platform_device *pdev,
- struct mvpp2_tx_queue *aggr_txq, int cpu,
- struct mvpp2 *priv)
+ struct mvpp2_tx_queue *aggr_txq,
+ unsigned int thread, struct mvpp2 *priv)
{
u32 txq_dma;
@@ -2031,7 +2069,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
/* Aggr TXQ no reset WA */
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
- MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+ MVPP2_AGGR_TXQ_INDEX_REG(thread));
/* Set Tx descriptors queue starting address indirect
* access
@@ -2042,8 +2080,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
txq_dma = aggr_txq->descs_dma >>
MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
MVPP2_AGGR_TXQ_SIZE);
return 0;
@@ -2054,8 +2092,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ unsigned int thread;
u32 rxq_dma;
- int cpu;
rxq->size = port->rx_ring_size;
@@ -2072,15 +2110,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
if (port->priv->hw_version == MVPP21)
rxq_dma = rxq->descs_dma;
else
rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
put_cpu();
/* Set Offset */
@@ -2125,7 +2163,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- int cpu;
+ unsigned int thread;
mvpp2_rxq_drop_pkts(port, rxq);
@@ -2144,10 +2182,10 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
put_cpu();
}
@@ -2156,7 +2194,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
u32 val;
- int cpu, desc, desc_per_txq, tx_port_num;
+ unsigned int thread;
+ int desc, desc_per_txq, tx_port_num;
struct mvpp2_txq_pcpu *txq_pcpu;
txq->size = port->tx_ring_size;
@@ -2171,18 +2210,18 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
txq->descs_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
@@ -2193,7 +2232,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
(txq->log_id * desc_per_txq);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
put_cpu();
@@ -2212,8 +2251,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
val);
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
txq_pcpu->size = txq->size;
txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
sizeof(*txq_pcpu->buffs),
@@ -2247,10 +2286,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
struct mvpp2_txq_pcpu *txq_pcpu;
- int cpu;
+ unsigned int thread;
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
kfree(txq_pcpu->buffs);
if (txq_pcpu->tso_headers)
@@ -2276,10 +2315,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
put_cpu();
}
@@ -2287,14 +2326,14 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
{
struct mvpp2_txq_pcpu *txq_pcpu;
- int delay, pending, cpu;
+ int delay, pending;
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
@@ -2310,17 +2349,17 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
mdelay(1);
delay++;
- pending = mvpp2_percpu_read(port->priv, cpu,
+ pending = mvpp2_thread_read(port->priv, thread,
MVPP2_TXQ_PENDING_REG);
pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
put_cpu();
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */
mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
@@ -2387,13 +2426,17 @@ err_cleanup:
static int mvpp2_setup_txqs(struct mvpp2_port *port)
{
struct mvpp2_tx_queue *txq;
- int queue, err;
+ int queue, err, cpu;
for (queue = 0; queue < port->ntxqs; queue++) {
txq = port->txqs[queue];
err = mvpp2_txq_init(port, txq);
if (err)
goto err_cleanup;
+
+ /* Assign this queue to a CPU */
+ cpu = queue % num_present_cpus();
+ netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
}
if (port->has_tx_irqs) {
@@ -2501,16 +2544,20 @@ static void mvpp2_tx_proc_cb(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
+ port_pcpu = per_cpu_ptr(port->pcpu,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+
if (!netif_running(dev))
return;
port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
- tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
+ tx_todo = mvpp2_tx_done(port, cause,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
if (tx_todo)
@@ -2598,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -2617,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@@ -2726,7 +2774,8 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
@@ -2743,7 +2792,8 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_tx_queue *aggr_txq,
struct mvpp2_tx_queue *txq)
{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
struct mvpp2_tx_desc *tx_desc;
int i;
dma_addr_t buf_dma_addr;
@@ -2862,9 +2912,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
int i, len, descs = 0;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
- tso_count_descs(skb)) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
tso_count_descs(skb)))
return 0;
@@ -2904,21 +2953,28 @@ release:
}
/* Main tx processing */
-static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_tx_queue *txq, *aggr_txq;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
dma_addr_t buf_dma_addr;
+ unsigned long flags = 0;
+ unsigned int thread;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+
txq_id = skb_get_queue_mapping(skb);
txq = port->txqs[txq_id];
- txq_pcpu = this_cpu_ptr(txq->pcpu);
- aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ aggr_txq = &port->priv->aggr_txqs[thread];
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->tx_lock[thread], flags);
if (skb_is_gso(skb)) {
frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
@@ -2927,9 +2983,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
- txq_pcpu, frags)) {
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
frags = 0;
goto out;
}
@@ -2971,7 +3026,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+ struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq_pcpu->reserved_num -= frags;
@@ -3001,11 +3056,14 @@ out:
/* Set the timer in case not all frags were processed */
if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
txq_pcpu->count > 0) {
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
mvpp2_timer_set(port_pcpu);
}
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->tx_lock[thread], flags);
+
return NETDEV_TX_OK;
}
@@ -3025,7 +3083,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
struct mvpp2_queue_vector *qv;
- int cpu = smp_processor_id();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@ -3039,7 +3097,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
+ cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -3048,19 +3106,22 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Clear the cause register */
mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_percpu_write(port->priv, cpu,
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- if (cause_tx) {
- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ if (port->has_tx_irqs) {
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
}
/* Process RX packets */
- cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx = cause_rx_tx &
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
cause_rx <<= qv->first_rxq;
cause_rx |= qv->pending_cause_rx;
while (cause_rx && budget > 0) {
@@ -3135,7 +3196,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++)
napi_enable(&port->qvecs[i].napi);
- /* Enable interrupts on all CPUs */
+ /* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
if (port->priv->hw_version == MVPP22)
@@ -3150,9 +3211,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
*/
struct phylink_link_state state = {
.interface = port->phy_interface,
- .link = 1,
};
mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+ mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
+ NULL);
}
netif_tx_start_all_queues(port->dev);
@@ -3163,7 +3225,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
{
int i;
- /* Disable interrupts on all CPUs */
+ /* Disable interrupts on all threads */
mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++)
@@ -3243,9 +3305,18 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
if (err)
goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
- irq_set_affinity_hint(qv->irq,
- cpumask_of(qv->sw_thread_id));
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ unsigned long mask = 0;
+ unsigned int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (mvpp2_cpu_to_thread(port->priv, cpu) ==
+ qv->sw_thread_id)
+ mask |= BIT(cpu);
+ }
+
+ irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+ }
}
return 0;
@@ -3389,11 +3460,11 @@ static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
- int cpu;
+ unsigned int thread;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */
+ /* Mask interrupts on all threads */
on_each_cpu(mvpp2_interrupts_mask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, true);
@@ -3404,8 +3475,8 @@ static int mvpp2_stop(struct net_device *dev)
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
@@ -3550,7 +3621,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mvpp2_port *port = netdev_priv(dev);
unsigned int start;
- int cpu;
+ unsigned int cpu;
for_each_possible_cpu(cpu) {
struct mvpp2_pcpu_stats *cpu_stats;
@@ -3977,12 +4048,18 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
struct device_node *port_node)
{
+ struct mvpp2 *priv = port->priv;
struct mvpp2_queue_vector *v;
int i, ret;
- port->nqvecs = num_possible_cpus();
- if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
- port->nqvecs += 1;
+ switch (queue_mode) {
+ case MVPP2_QDIST_SINGLE_MODE:
+ port->nqvecs = priv->nthreads + 1;
+ break;
+ case MVPP2_QDIST_MULTI_MODE:
+ port->nqvecs = priv->nthreads;
+ break;
+ }
for (i = 0; i < port->nqvecs; i++) {
char irqname[16];
@@ -3994,7 +4071,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->sw_thread_id = i;
v->sw_thread_mask = BIT(i);
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+ else
+ snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
v->first_rxq = i * MVPP2_DEFAULT_RXQ;
@@ -4004,7 +4084,9 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->first_rxq = 0;
v->nrxqs = port->nrxqs;
v->type = MVPP2_QUEUE_VECTOR_SHARED;
- strncpy(irqname, "rx-shared", sizeof(irqname));
+
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ strncpy(irqname, "rx-shared", sizeof(irqname));
}
if (port_node)
@@ -4081,7 +4163,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct device *dev = port->dev->dev.parent;
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
- int queue, cpu, err;
+ unsigned int thread;
+ int queue, err;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -4125,9 +4208,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
txq->id = queue_phy_id;
txq->log_id = queue;
txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- txq_pcpu->cpu = cpu;
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ txq_pcpu->thread = thread;
}
port->txqs[queue] = txq;
@@ -4200,24 +4283,51 @@ err_free_percpu:
return err;
}
-/* Checks if the port DT description has the TX interrupts
- * described. On PPv2.1, there are no such interrupts. On PPv2.2,
- * there are available, but we need to keep support for old DTs.
+static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
+ unsigned long *flags)
+{
+ char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3" };
+ int i;
+
+ for (i = 0; i < 5; i++)
+ if (of_property_match_string(port_node, "interrupt-names",
+ irqs[i]) < 0)
+ return false;
+
+ *flags |= MVPP2_F_DT_COMPAT;
+ return true;
+}
+
+/* Checks if the port dt description has the required Tx interrupts:
+ * - PPv2.1: there are no such interrupts.
+ * - PPv2.2:
+ * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
+ * - The new ones have: "hifX" with X in [0..8]
+ *
+ * All those variants are supported to keep the backward compatibility.
*/
-static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
- struct device_node *port_node)
+static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
+ struct device_node *port_node,
+ unsigned long *flags)
{
- char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
- "tx-cpu2", "tx-cpu3" };
- int ret, i;
+ char name[5];
+ int i;
+
+ /* ACPI */
+ if (!port_node)
+ return true;
if (priv->hw_version == MVPP21)
return false;
- for (i = 0; i < 5; i++) {
- ret = of_property_match_string(port_node, "interrupt-names",
- irqs[i]);
- if (ret < 0)
+ if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
+ return true;
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ snprintf(name, 5, "hif%d", i);
+ if (of_property_match_string(port_node, "interrupt-names",
+ name) < 0)
return false;
}
@@ -4495,10 +4605,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
return;
}
- netif_tx_stop_all_queues(port->dev);
- if (!port->has_phy)
- netif_carrier_off(port->dev);
-
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
@@ -4523,16 +4629,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
- /* If the port already was up, make sure it's still in the same state */
- if (state->link || !port->has_phy) {
- mvpp2_port_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- if (!port->has_phy)
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- }
+ mvpp2_port_enable(port);
}
static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4607,23 +4704,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
struct phylink *phylink;
char *mac_from = "";
- unsigned int ntxqs, nrxqs;
+ unsigned int ntxqs, nrxqs, thread;
+ unsigned long flags = 0;
bool has_tx_irqs;
u32 id;
int features;
int phy_mode;
- int err, i, cpu;
+ int err, i;
- if (port_node) {
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
- } else {
- has_tx_irqs = true;
- queue_mode = MVPP2_QDIST_MULTI_MODE;
+ has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
+ if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
+ dev_err(&pdev->dev,
+ "not enough IRQs to support multi queue mode\n");
+ return -EINVAL;
}
- if (!has_tx_irqs)
- queue_mode = MVPP2_QDIST_SINGLE_MODE;
-
ntxqs = MVPP2_MAX_TXQ;
if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
@@ -4671,6 +4766,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->nrxqs = nrxqs;
port->priv = priv;
port->has_tx_irqs = has_tx_irqs;
+ port->flags = flags;
err = mvpp2_queue_vectors_init(port, port_node);
if (err)
@@ -4767,8 +4863,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4803,6 +4899,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+ dev->dev.of_node = port_node;
/* Phylink isn't used w/ ACPI as of now */
if (port_node) {
@@ -5051,13 +5148,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
}
/* Allocate and initialize aggregated TXQs */
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+ priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
sizeof(*priv->aggr_txqs),
GFP_KERNEL);
if (!priv->aggr_txqs)
return -ENOMEM;
- for_each_present_cpu(i) {
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
priv->aggr_txqs[i].id = i;
priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
@@ -5104,7 +5201,7 @@ static int mvpp2_probe(struct platform_device *pdev)
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
- int i;
+ int i, shared;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -5169,6 +5266,15 @@ static int mvpp2_probe(struct platform_device *pdev)
mvpp2_setup_bm_pool();
+
+ priv->nthreads = min_t(unsigned int, num_present_cpus(),
+ MVPP2_MAX_THREADS);
+
+ shared = num_present_cpus() - priv->nthreads;
+ if (shared > 0)
+ bitmap_fill(&priv->lock_map,
+ min_t(int, shared, MVPP2_MAX_THREADS));
+
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz;
@@ -5343,7 +5449,7 @@ static int mvpp2_remove(struct platform_device *pdev)
mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
}
- for_each_present_cpu(i) {
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
dma_free_coherent(&pdev->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 000000000000..35827bdf1878
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,17 @@
+#
+# Marvell OcteonTX2 drivers configuration
+#
+
+config OCTEONTX2_MBOX
+ tristate
+
+config OCTEONTX2_AF
+ tristate "Marvell OcteonTX2 RVU Admin Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 Resource Virtualization
+ Unit's admin function manager which manages all RVU HW resources
+ and provides a medium to other PF/VFs to configure HW. Should be
+ enabled for other RVU device drivers to work.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..e579dcd54c97
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_AF) += af/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000000..06329acf9c2c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+
+octeontx2_mbox-y := mbox.o
+octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000000..12db256c8c9f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+
+#define DRV_NAME "octeontx2-cgx"
+#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
+
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @event_cb: callback for linkchange events
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct cgx_event_cb event_cb;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct list_head cgx_list;
+};
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+
+/* Convert firmware lmac type encoding to string */
+static char *cgx_lmactype_string[LMAC_MODE_MAX];
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << 18) + offset);
+}
+
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgx_cnt(void)
+{
+ struct cgx *cgx_dev;
+ int count = 0;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL(cgx_get_cgx_cnt);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo)
+{
+ struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+ if (!lmac)
+ return -ENODEV;
+
+ *linfo = lmac->link_info;
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_link_info);
+
+static u64 mac2u64 (u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+ return mac;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ /* copy 6bytes from macaddr */
+ /* memcpy(&cfg, mac_addr, 6); */
+
+ cfg = mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_set);
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_get);
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_set_pkind);
+
+static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+ else
+ cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= CGXX_SPUX_CONTROL1_LBK;
+ else
+ cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+ cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_internal_loopback);
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgx_get_pdata(cgx_id);
+ u64 cfg = 0;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable promiscuous mode on LMAC */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
+ cfg |= CGX_DMAC_BCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ } else {
+ /* Disable promiscuous mode */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_promisc_config);
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_rx_stats);
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_tx_stats);
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+
+/* CGX Firmware interface low level support */
+static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd timeout\n",
+ cgx->cgx_id, lmac->lmac_id);
+ err = -EIO;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
+ struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+static inline void cgx_link_usertable_init(void)
+{
+ cgx_speed_mbps[CGX_LINK_NONE] = 0;
+ cgx_speed_mbps[CGX_LINK_10M] = 10;
+ cgx_speed_mbps[CGX_LINK_100M] = 100;
+ cgx_speed_mbps[CGX_LINK_1G] = 1000;
+ cgx_speed_mbps[CGX_LINK_2HG] = 2500;
+ cgx_speed_mbps[CGX_LINK_5G] = 5000;
+ cgx_speed_mbps[CGX_LINK_10G] = 10000;
+ cgx_speed_mbps[CGX_LINK_20G] = 20000;
+ cgx_speed_mbps[CGX_LINK_25G] = 25000;
+ cgx_speed_mbps[CGX_LINK_40G] = 40000;
+ cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_100G] = 100000;
+
+ cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
+ cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
+ cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
+ cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
+ cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
+ cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
+ cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
+ cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
+ cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
+ cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+}
+
+static inline void link_status_user_format(u64 lstat,
+ struct cgx_link_user_info *linfo,
+ struct cgx *cgx, u8 lmac_id)
+{
+ char *lmac_string;
+
+ linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+ int err_type;
+
+ dev = &cgx->pdev->dev;
+
+ link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+ err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ /* update the local copy of link status */
+ lmac->link_info = event.link_uinfo;
+ linfo = &lmac->link_info;
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id, err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+ cgx->cgx_id, lmac->lmac_id,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed);
+ return;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+ u64 event;
+
+ cgx = lmac->cgx;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_register);
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+ minor_ver != CGX_FIRMWARE_MINOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i, err;
+
+ cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+ cgx->lmac_count = MAX_LMAC_PER_CGX;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name)
+ return -ENOMEM;
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ lmac->lmac_id = i;
+ lmac->cgx = cgx;
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ err = request_irq(pci_irq_vector(cgx->pdev,
+ CGX_LMAC_FWI + i * 9),
+ cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+ FW_CGX_INT);
+
+ /* Add reference */
+ cgx->lmac_idmap[i] = lmac;
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ /* Free all lmac related resources */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ nvec = CGX_NVEC;
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+ cgx->cgx_id = cgx_get_cgx_cnt() - 1;
+
+ cgx_link_usertable_init();
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000000..0a66d2717442
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define MAX_CGX 3
+#define MAX_LMAC_PER_CGX 4
+#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_CFG 0x00
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGXX_CMRX_RX_DMAC_CAM1 0x400
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0 0x700
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL 0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 2200 /* msecs */
+
+#define CGX_NVEC 37
+#define CGX_LMAC_FWI 0
+
+enum LMAC_TYPE {
+ LMAC_MODE_SGMII = 0,
+ LMAC_MODE_XAUI = 1,
+ LMAC_MODE_RXAUI = 2,
+ LMAC_MODE_10G_R = 3,
+ LMAC_MODE_40G_R = 4,
+ LMAC_MODE_QSGMII = 6,
+ LMAC_MODE_25G_R = 7,
+ LMAC_MODE_50G_R = 8,
+ LMAC_MODE_100G_R = 9,
+ LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+ struct cgx_link_user_info link_uinfo;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgx_cnt(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000000..fa17af3f4ba7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_IRQ_ENABLE,
+ CGX_CMD_IRQ_DISABLE,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t reserved2:39;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 000000000000..d39ada404c8f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN 128 /* Align to cacheline */
+
+#define Q_SIZE_16 0ULL /* 16 entries */
+#define Q_SIZE_64 1ULL /* 64 entries */
+#define Q_SIZE_256 2ULL
+#define Q_SIZE_1K 3ULL
+#define Q_SIZE_4K 4ULL
+#define Q_SIZE_16K 5ULL
+#define Q_SIZE_64K 6ULL
+#define Q_SIZE_256K 7ULL
+#define Q_SIZE_1M 8ULL /* Million entries */
+#define Q_SIZE_MIN Q_SIZE_16
+#define Q_SIZE_MAX Q_SIZE_1M
+
+#define Q_COUNT(x) (16ULL << (2 * x))
+#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK 0xFFFFF
+
+struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+ u8 entry_sz;
+ u8 align;
+ u32 qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+ int qsize, int entry_sz)
+{
+ struct qmem *qmem;
+ int aligned_addr;
+
+ if (!qsize)
+ return -EINVAL;
+
+ *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+ if (!*q)
+ return -ENOMEM;
+ qmem = *q;
+
+ qmem->entry_sz = entry_sz;
+ qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+ qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
+ &qmem->iova, GFP_KERNEL);
+ if (!qmem->base)
+ return -ENOMEM;
+
+ qmem->qsize = qsize;
+
+ aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+ qmem->align = (aligned_addr - qmem->iova);
+ qmem->base += qmem->align;
+ qmem->iova += qmem->align;
+ return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+ if (!qmem)
+ return;
+
+ if (qmem->base)
+ dma_free_coherent(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align);
+ devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+ struct qmem *inst;
+ struct qmem *res;
+ spinlock_t lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+ NPA_AURA_SZ_0,
+ NPA_AURA_SZ_128,
+ NPA_AURA_SZ_256,
+ NPA_AURA_SZ_512,
+ NPA_AURA_SZ_1K,
+ NPA_AURA_SZ_2K,
+ NPA_AURA_SZ_4K,
+ NPA_AURA_SZ_8K,
+ NPA_AURA_SZ_16K,
+ NPA_AURA_SZ_32K,
+ NPA_AURA_SZ_64K,
+ NPA_AURA_SZ_128K,
+ NPA_AURA_SZ_256K,
+ NPA_AURA_SZ_512K,
+ NPA_AURA_SZ_1M,
+ NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+ struct npa_aq_res_s res;
+ struct npa_aura_s aura_ctx;
+ struct npa_aura_s ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+ struct npa_aq_res_s res;
+ struct npa_pool_s pool_ctx;
+ struct npa_pool_s ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+ NIX_TXSCH_LVL_SMQ = 0x0,
+ NIX_TXSCH_LVL_MDQ = 0x0,
+ NIX_TXSCH_LVL_TL4 = 0x1,
+ NIX_TXSCH_LVL_TL3 = 0x2,
+ NIX_TXSCH_LVL_TL2 = 0x3,
+ NIX_TXSCH_LVL_TL1 = 0x4,
+ NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NPC_MCAM_KEY_X1 0
+#define NPC_MCAM_KEY_X2 1
+#define NPC_MCAM_KEY_X4 2
+
+#define NIX_INTF_RX 0
+#define NIX_INTF_TX 1
+
+#define NIX_INTF_TYPE_CGX 0
+#define NIX_INTF_TYPE_LBK 1
+
+#define MAX_LMAC_PKIND 12
+#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4 0
+#define NIX_LSO_FORMAT_IDX_TSOV6 1
+
+/* RSS info */
+#define MAX_RSS_GROUPS 8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP 0
+#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+
+/* NIX flow tag, key type flags */
+#define FLOW_KEY_TYPE_PORT BIT(0)
+#define FLOW_KEY_TYPE_IPV4 BIT(1)
+#define FLOW_KEY_TYPE_IPV6 BIT(2)
+#define FLOW_KEY_TYPE_TCP BIT(3)
+#define FLOW_KEY_TYPE_UDP BIT(4)
+#define FLOW_KEY_TYPE_SCTP BIT(5)
+
+/* NIX flow tag algorithm indices, max is 31 */
+enum {
+ FLOW_KEY_ALG_PORT,
+ FLOW_KEY_ALG_IP,
+ FLOW_KEY_ALG_TCP,
+ FLOW_KEY_ALG_UDP,
+ FLOW_KEY_ALG_SCTP,
+ FLOW_KEY_ALG_TCP_UDP,
+ FLOW_KEY_ALG_TCP_SCTP,
+ FLOW_KEY_ALG_UDP_SCTP,
+ FLOW_KEY_ALG_TCP_UDP_SCTP,
+ FLOW_KEY_ALG_MAX,
+};
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000000..85ba24a05774
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int timeout = 0, sleep = 1;
+
+ while (mdev->num_msgs != mdev->msgs_acked) {
+ msleep(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT)
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ return ERR_PTR(-ENODEV);
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ return ERR_PTR(-ENODEV);
+ return prsp;
+ }
+
+ imsg = pmsg->next_msgoff;
+ irsp = prsp->next_msgoff;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000000..a15a59c9a239
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which preceeds all mbox messages */
+struct mbox_hdr {
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which preceeds every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x0001)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic rsponse msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_feq; /* SCLK frequency */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a cetain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u8 sso;
+ u8 ssow;
+ u8 timlfs;
+ u8 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 13
+#define CGX_TX_STATS_COUNT 18
+ u64 rx_stats[CGX_RX_STATS_COUNT];
+ u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct cgx_link_user_info {
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t lmac_type_id:4;
+ uint64_t speed:20; /* speed in Mbps */
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ int aura_sz; /* No of auras */
+ u32 nr_pools; /* No of pools */
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ u8 ctype;
+};
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u32 rq_cnt; /* No of receive queues */
+ u32 sq_cnt; /* No of send queues */
+ u32 cq_cnt; /* No of completion queues */
+ u8 xqe_sz;
+ u16 rss_sz;
+ u8 rss_grps;
+ u16 npa_func;
+ u16 sso_func;
+ u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 sqb_size;
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+ union {
+ struct nix_rq_ctx_s rq_mask;
+ struct nix_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+ u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ /* Scheduler queue list allocated at each level */
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ u16 flags;
+ /* Scheduler queue level to be freed */
+ u16 schq_lvl;
+ /* List of scheduler queues to be freed */
+ u16 schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ u64 reg[MAX_REGS_PER_MBOX_MSG];
+ u64 regval[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ u8 vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ u8 cfg_type;
+ union {
+ /* valid when cfg_type is '0' */
+ struct {
+ /* tx vlan0 tag(C-VLAN) */
+ u64 vlan0;
+ /* tx vlan1 tag(S-VLAN) */
+ u64 vlan1;
+ /* insert tx vlan tag */
+ u8 insert_vlan :1;
+ /* insert tx double vlan tag */
+ u8 double_vlan :1;
+ } tx;
+
+ /* valid when cfg_type is '1' */
+ struct {
+ /* rx vtag type index */
+ u8 vtag_type;
+ /* rx vtag strip */
+ u8 strip_vtag :1;
+ /* rx vtag capture */
+ u8 capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int mcam_index; /* MCAM entry index to modify */
+ u32 flowkey_cfg; /* Flowkey types selected */
+ u8 group; /* RSS context or group */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+ u16 mode;
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 000000000000..f98b0113def3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_QINQ,
+ NPC_LT_LB_ITAG,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_IGMP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_GRE_MPLS,
+ NPC_LT_LD_GRE_NSH,
+ NPC_LT_LD_TU_MPLS,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_TU_ETHER = 1,
+ NPC_LT_LE_TU_PPP,
+ NPC_LT_LE_TU_MPLS_IN_NSH,
+ NPC_LT_LE_TU_3RD_NSH,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_IP = 1,
+ NPC_LT_LF_TU_IP6,
+ NPC_LT_LF_TU_ARP,
+ NPC_LT_LF_TU_MPLS_IP,
+ NPC_LT_LF_TU_MPLS_IP6,
+ NPC_LT_LF_TU_MPLS_ETHER,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_TCP = 1,
+ NPC_LT_LG_TU_UDP,
+ NPC_LT_LG_TU_SCTP,
+ NPC_LT_LG_TU_ICMP,
+ NPC_LT_LG_TU_IGMP,
+ NPC_LT_LG_TU_ICMP6,
+ NPC_LT_LG_TU_ESP,
+ NPC_LT_LG_TU_AH,
+};
+
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TCP_DATA = 1,
+ NPC_LT_LH_HTTP_DATA,
+ NPC_LT_LH_HTTPS_DATA,
+ NPC_LT_LH_PPTP_DATA,
+ NPC_LT_LH_UDP_DATA,
+};
+
+struct npc_kpu_profile_cam {
+ u8 state;
+ u8 state_mask;
+ u16 dp0;
+ u16 dp0_mask;
+ u16 dp1;
+ u16 dp1_mask;
+ u16 dp2;
+ u16 dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+ u8 errlev;
+ u8 errcode;
+ u8 dp0_offset;
+ u8 dp1_offset;
+ u8 dp2_offset;
+ u8 bypass_count;
+ u8 parse_done;
+ u8 next_state;
+ u8 ptr_advance;
+ u8 cap_ena;
+ u8 lid;
+ u8 ltype;
+ u8 flags;
+ u8 offset;
+ u8 mask;
+ u8 right;
+ u8 shift;
+};
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_56 : 8;
+ u64 state : 8;
+ u64 dp2_data : 16;
+ u64 dp1_data : 16;
+ u64 dp0_data : 16;
+#else
+ u64 dp0_data : 16;
+ u64 dp1_data : 16;
+ u64 dp2_data : 16;
+ u64 state : 8;
+ u64 rsvd_63_56 : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_57 : 7;
+ u64 byp_count : 3;
+ u64 capture_ena : 1;
+ u64 parse_done : 1;
+ u64 next_state : 8;
+ u64 rsvd_43 : 1;
+ u64 capture_lid : 3;
+ u64 capture_ltype : 4;
+ u64 capture_flags : 8;
+ u64 ptr_advance : 8;
+ u64 var_len_offset : 8;
+ u64 var_len_mask : 8;
+ u64 var_len_right : 1;
+ u64 var_len_shift : 3;
+#else
+ u64 var_len_shift : 3;
+ u64 var_len_right : 1;
+ u64 var_len_mask : 8;
+ u64 var_len_offset : 8;
+ u64 ptr_advance : 8;
+ u64 capture_flags : 8;
+ u64 capture_ltype : 4;
+ u64 capture_lid : 3;
+ u64 rsvd_43 : 1;
+ u64 next_state : 8;
+ u64 parse_done : 1;
+ u64 capture_ena : 1;
+ u64 byp_count : 3;
+ u64 rsvd_63_57 : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_36 : 28;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 dp2_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp0_offset : 8;
+#else
+ u64 dp0_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp2_offset : 8;
+ u64 errcode : 8;
+ u64 errlev : 4;
+ u64 rsvd_63_36 : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 ena : 1;
+ u64 rsvd_62_59 : 4;
+ u64 lid : 3;
+ u64 ltype_match : 4;
+ u64 ltype_mask : 4;
+ u64 flags_match : 8;
+ u64 flags_mask : 8;
+ u64 add_offset : 8;
+ u64 add_mask : 8;
+ u64 rsvd_15 : 1;
+ u64 add_shift : 3;
+ u64 rsvd_11_10 : 2;
+ u64 cpi_base : 10;
+#else
+ u64 cpi_base : 10;
+ u64 rsvd_11_10 : 2;
+ u64 add_shift : 3;
+ u64 rsvd_15 : 1;
+ u64 add_mask : 8;
+ u64 add_offset : 8;
+ u64 flags_mask : 8;
+ u64 flags_match : 8;
+ u64 ltype_mask : 4;
+ u64 ltype_match : 4;
+ u64 lid : 3;
+ u64 rsvd_62_59 : 4;
+ u64 ena : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_61 :3;
+ u64 flow_key_alg :5;
+ u64 match_id :16;
+ u64 index :20;
+ u64 pf_func :16;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 pf_func :16;
+ u64 index :20;
+ u64 match_id :16;
+ u64 flow_key_alg :5;
+ u64 rsvd_63_61 :3;
+#endif
+};
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 000000000000..b2ce957605bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,5709 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_ETYPE_IP 0x0800
+#define NPC_ETYPE_IP6 0x86dd
+#define NPC_ETYPE_ARP 0x0806
+#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_MPLSU 0x8847
+#define NPC_ETYPE_MPLSM 0x8848
+#define NPC_ETYPE_ETAG 0x893f
+#define NPC_ETYPE_CTAG 0x8100
+#define NPC_ETYPE_SBTAG 0x88a8
+#define NPC_ETYPE_ITAG 0x88e7
+#define NPC_ETYPE_PTP 0x88f7
+#define NPC_ETYPE_FCOE 0x8906
+#define NPC_ETYPE_QINQ 0x9100
+#define NPC_ETYPE_TRANS_ETH_BR 0x6558
+#define NPC_ETYPE_PPP 0x880b
+#define NPC_ETYPE_NSH 0x894f
+
+#define NPC_IPNH_HOP 0
+#define NPC_IPNH_ICMP 1
+#define NPC_IPNH_IGMP 2
+#define NPC_IPNH_IP 4
+#define NPC_IPNH_TCP 6
+#define NPC_IPNH_UDP 17
+#define NPC_IPNH_IP6 41
+#define NPC_IPNH_ROUT 43
+#define NPC_IPNH_FRAG 44
+#define NPC_IPNH_GRE 47
+#define NPC_IPNH_ESP 50
+#define NPC_IPNH_AH 51
+#define NPC_IPNH_ICMP6 58
+#define NPC_IPNH_NONH 59
+#define NPC_IPNH_DEST 60
+#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MPLS 137
+
+#define NPC_UDP_PORT_GTPC 2123
+#define NPC_UDP_PORT_GTPU 2152
+#define NPC_UDP_PORT_VXLAN 4789
+#define NPC_UDP_PORT_VXLANGPE 4790
+#define NPC_UDP_PORT_GENEVE 6081
+
+#define NPC_VXLANGPE_NP_IP 0x1
+#define NPC_VXLANGPE_NP_IP6 0x2
+#define NPC_VXLANGPE_NP_ETH 0x3
+#define NPC_VXLANGPE_NP_NSH 0x4
+#define NPC_VXLANGPE_NP_MPLS 0x5
+#define NPC_VXLANGPE_NP_GBP 0x6
+#define NPC_VXLANGPE_NP_VBNG 0x7
+
+#define NPC_NSH_NP_IP 0x1
+#define NPC_NSH_NP_IP6 0x2
+#define NPC_NSH_NP_ETH 0x3
+#define NPC_NSH_NP_NSH 0x4
+#define NPC_NSH_NP_MPLS 0x5
+
+#define NPC_TCP_PORT_HTTP 80
+#define NPC_TCP_PORT_HTTPS 443
+#define NPC_TCP_PORT_PPTP 1723
+
+#define NPC_MPLS_S 0x0100
+
+#define NPC_IP_VER_4 0x4000
+#define NPC_IP_VER_6 0x6000
+#define NPC_IP_VER_MASK 0xf000
+#define NPC_IP_HDR_LEN_5 0x0500
+#define NPC_IP_HDR_LEN_MASK 0x0f00
+
+#define NPC_GRE_F_CSUM (0x1 << 15)
+#define NPC_GRE_F_ROUTE (0x1 << 14)
+#define NPC_GRE_F_KEY (0x1 << 13)
+#define NPC_GRE_F_SEQ (0x1 << 12)
+#define NPC_GRE_F_ACK (0x1 << 7)
+#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK 0x0003
+#define NPC_GRE_VER_1 0x0001
+
+#define NPC_VXLAN_I 0x0800
+
+#define NPC_VXLANGPE_VER (0x3 << 12)
+#define NPC_VXLANGPE_I (0x1 << 11)
+#define NPC_VXLANGPE_P (0x1 << 10)
+#define NPC_VXLANGPE_B (0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK 0x00ff
+
+#define NPC_NSH_NP_MASK 0x00ff
+
+#define NPC_GENEVE_F_OAM (0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT (0x1 << 6)
+
+#define NPC_GTP_PT_GTP (0x1 << 12)
+#define NPC_GTP_PT_MASK (0x1 << 12)
+#define NPC_GTP_VER1 (0x1 << 13)
+#define NPC_GTP_VER_MASK (0x7 << 13)
+#define NPC_GTP_MT_G_PDU 0xff
+#define NPC_GTP_MT_MASK 0xff
+
+#define NPC_TCP_DATA_OFFSET_5 0x5000
+#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+
+enum npc_kpu_parser_state {
+ NPC_S_NA = 0,
+ NPC_S_KPU1_ETHER,
+ NPC_S_KPU1_PKI,
+ NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_SBTAG,
+ NPC_S_KPU2_QINQ,
+ NPC_S_KPU2_ETAG,
+ NPC_S_KPU2_ITAG,
+ NPC_S_KPU3_CTAG,
+ NPC_S_KPU3_STAG,
+ NPC_S_KPU3_QINQ,
+ NPC_S_KPU3_ITAG,
+ NPC_S_KPU4_MPLS,
+ NPC_S_KPU4_NSH,
+ NPC_S_KPU5_IP,
+ NPC_S_KPU5_IP6,
+ NPC_S_KPU5_ARP,
+ NPC_S_KPU5_RARP,
+ NPC_S_KPU5_PTP,
+ NPC_S_KPU5_FCOE,
+ NPC_S_KPU5_MPLS,
+ NPC_S_KPU5_MPLS_PL,
+ NPC_S_KPU5_NSH,
+ NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU8_TCP,
+ NPC_S_KPU8_UDP,
+ NPC_S_KPU8_SCTP,
+ NPC_S_KPU8_ICMP,
+ NPC_S_KPU8_IGMP,
+ NPC_S_KPU8_ICMP6,
+ NPC_S_KPU8_GRE,
+ NPC_S_KPU8_ESP,
+ NPC_S_KPU8_AH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
+ NPC_S_KPU9_TU_MPLS,
+ NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU10_TU_MPLS_PL,
+ NPC_S_KPU10_TU_MPLS,
+ NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU11_TU_ETHER,
+ NPC_S_KPU11_TU_PPP,
+ NPC_S_KPU11_TU_MPLS_IN_NSH,
+ NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU12_TU_IP,
+ NPC_S_KPU12_TU_IP6,
+ NPC_S_KPU12_TU_ARP,
+ NPC_S_KPU13_TU_IP6_EXT,
+ NPC_S_KPU14_TU_IP6_EXT,
+ NPC_S_KPU15_TU_TCP,
+ NPC_S_KPU15_TU_UDP,
+ NPC_S_KPU15_TU_SCTP,
+ NPC_S_KPU15_TU_ICMP,
+ NPC_S_KPU15_TU_IGMP,
+ NPC_S_KPU15_TU_ICMP6,
+ NPC_S_KPU15_TU_ESP,
+ NPC_S_KPU15_TU_AH,
+ NPC_S_KPU16_HTTP_DATA,
+ NPC_S_KPU16_HTTPS_DATA,
+ NPC_S_KPU16_PPTP_DATA,
+ NPC_S_KPU16_TCP_DATA,
+ NPC_S_KPU16_UDP_DATA,
+ NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_parser_flag {
+ NPC_F_NA = 0,
+ NPC_F_PKI,
+ NPC_F_PKI_VLAN,
+ NPC_F_PKI_ETAG,
+ NPC_F_PKI_ITAG,
+ NPC_F_PKI_MPLS,
+ NPC_F_PKI_NSH,
+ NPC_F_ETYPE_UNK,
+ NPC_F_ETHER_VLAN,
+ NPC_F_ETHER_ETAG,
+ NPC_F_ETHER_ITAG,
+ NPC_F_ETHER_MPLS,
+ NPC_F_ETHER_NSH,
+ NPC_F_STAG_CTAG,
+ NPC_F_STAG_CTAG_UNK,
+ NPC_F_STAG_STAG_CTAG,
+ NPC_F_STAG_STAG_STAG,
+ NPC_F_QINQ_CTAG,
+ NPC_F_QINQ_CTAG_UNK,
+ NPC_F_QINQ_QINQ_CTAG,
+ NPC_F_QINQ_QINQ_QINQ,
+ NPC_F_BTAG_ITAG,
+ NPC_F_BTAG_ITAG_STAG,
+ NPC_F_BTAG_ITAG_CTAG,
+ NPC_F_BTAG_ITAG_UNK,
+ NPC_F_ETAG_CTAG,
+ NPC_F_ETAG_BTAG_ITAG,
+ NPC_F_ETAG_STAG,
+ NPC_F_ETAG_QINQ,
+ NPC_F_ETAG_ITAG,
+ NPC_F_ETAG_ITAG_STAG,
+ NPC_F_ETAG_ITAG_CTAG,
+ NPC_F_ETAG_ITAG_UNK,
+ NPC_F_ITAG_STAG_CTAG,
+ NPC_F_ITAG_STAG,
+ NPC_F_ITAG_CTAG,
+ NPC_F_MPLS_4_LABELS,
+ NPC_F_MPLS_3_LABELS,
+ NPC_F_MPLS_2_LABELS,
+ NPC_F_IP_HAS_OPTIONS,
+ NPC_F_IP_IP_IN_IP,
+ NPC_F_IP_6TO4,
+ NPC_F_IP_MPLS_IN_IP,
+ NPC_F_IP_UNK_PROTO,
+ NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_6TO4_HAS_OPTIONS,
+ NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
+ NPC_F_IP6_HAS_EXT,
+ NPC_F_IP6_TUN_IP6,
+ NPC_F_IP6_MPLS_IN_IP,
+ NPC_F_TCP_HAS_OPTIONS,
+ NPC_F_TCP_HTTP,
+ NPC_F_TCP_HTTPS,
+ NPC_F_TCP_PPTP,
+ NPC_F_TCP_UNK_PORT,
+ NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_UDP_VXLAN,
+ NPC_F_UDP_VXLAN_NOVNI,
+ NPC_F_UDP_VXLAN_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE,
+ NPC_F_UDP_VXLANGPE_NSH,
+ NPC_F_UDP_VXLANGPE_MPLS,
+ NPC_F_UDP_VXLANGPE_NOVNI,
+ NPC_F_UDP_VXLANGPE_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
+ NPC_F_UDP_VXLANGPE_UNK,
+ NPC_F_UDP_VXLANGPE_NONP,
+ NPC_F_UDP_GTP_GTPC,
+ NPC_F_UDP_GTP_GTPU_G_PDU,
+ NPC_F_UDP_GTP_GTPU_UNK,
+ NPC_F_UDP_UNK_PORT,
+ NPC_F_UDP_GENEVE,
+ NPC_F_UDP_GENEVE_OAM,
+ NPC_F_UDP_GENEVE_CRI_OPT,
+ NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ NPC_F_GRE_NVGRE,
+ NPC_F_GRE_HAS_SRE,
+ NPC_F_GRE_HAS_CSUM,
+ NPC_F_GRE_HAS_KEY,
+ NPC_F_GRE_HAS_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY,
+ NPC_F_GRE_HAS_CSUM_SEQ,
+ NPC_F_GRE_HAS_KEY_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_GRE_HAS_ROUTE,
+ NPC_F_GRE_UNK_PROTO,
+ NPC_F_GRE_VER1,
+ NPC_F_GRE_VER1_HAS_SEQ,
+ NPC_F_GRE_VER1_HAS_ACK,
+ NPC_F_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_GRE_VER1_UNK_PROTO,
+ NPC_F_TU_ETHER_UNK,
+ NPC_F_TU_ETHER_CTAG,
+ NPC_F_TU_ETHER_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG_CTAG,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG,
+ NPC_F_TU_ETHER_STAG_UNK,
+ NPC_F_TU_ETHER_QINQ_CTAG,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK,
+ NPC_F_TU_ETHER_QINQ,
+ NPC_F_TU_ETHER_QINQ_UNK,
+ NPC_F_LAST /* has to be the last item */
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_MPLS_2MANY,
+ NPC_EC_L2_K4,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_VER,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ {
+ NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ {
+ NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ {
+ NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x2001, 0xef7f, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0001, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ {
+ NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ {
+ NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ {
+ NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU3_ITAG, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU5_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+ {
+ ARRAY_SIZE(kpu1_cam_entries),
+ ARRAY_SIZE(kpu1_action_entries),
+ &kpu1_cam_entries[0],
+ &kpu1_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu2_cam_entries),
+ ARRAY_SIZE(kpu2_action_entries),
+ &kpu2_cam_entries[0],
+ &kpu2_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu3_cam_entries),
+ ARRAY_SIZE(kpu3_action_entries),
+ &kpu3_cam_entries[0],
+ &kpu3_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu4_cam_entries),
+ ARRAY_SIZE(kpu4_action_entries),
+ &kpu4_cam_entries[0],
+ &kpu4_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu5_cam_entries),
+ ARRAY_SIZE(kpu5_action_entries),
+ &kpu5_cam_entries[0],
+ &kpu5_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu6_cam_entries),
+ ARRAY_SIZE(kpu6_action_entries),
+ &kpu6_cam_entries[0],
+ &kpu6_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu7_cam_entries),
+ ARRAY_SIZE(kpu7_action_entries),
+ &kpu7_cam_entries[0],
+ &kpu7_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu8_cam_entries),
+ ARRAY_SIZE(kpu8_action_entries),
+ &kpu8_cam_entries[0],
+ &kpu8_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu9_cam_entries),
+ ARRAY_SIZE(kpu9_action_entries),
+ &kpu9_cam_entries[0],
+ &kpu9_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu10_cam_entries),
+ ARRAY_SIZE(kpu10_action_entries),
+ &kpu10_cam_entries[0],
+ &kpu10_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu11_cam_entries),
+ ARRAY_SIZE(kpu11_action_entries),
+ &kpu11_cam_entries[0],
+ &kpu11_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu12_cam_entries),
+ ARRAY_SIZE(kpu12_action_entries),
+ &kpu12_cam_entries[0],
+ &kpu12_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu13_cam_entries),
+ ARRAY_SIZE(kpu13_action_entries),
+ &kpu13_cam_entries[0],
+ &kpu13_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu14_cam_entries),
+ ARRAY_SIZE(kpu14_action_entries),
+ &kpu14_cam_entries[0],
+ &kpu14_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu15_cam_entries),
+ ARRAY_SIZE(kpu15_action_entries),
+ &kpu15_cam_entries[0],
+ &kpu15_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu16_cam_entries),
+ ARRAY_SIZE(kpu16_action_entries),
+ &kpu16_cam_entries[0],
+ &kpu16_action_entries[0],
+ },
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000000..dc28fa2b9481
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,1772 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define DRV_NAME "octeontx2-af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_VERSION "1.0"
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ void __iomem *reg;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+ while (time_before(jiffies, timeout)) {
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1, 5);
+ timeout--;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ spin_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPC:
+ blkaddr = BLKADDR_NPC;
+ goto exit;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT0;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->type) {
+ case BLKTYPE_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKTYPE_NIX:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKTYPE_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKTYPE_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKTYPE_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKTYPE_CPT:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ *numvfs = (cfg >> 12) & 0xFF;
+ *hwvf = cfg & 0xFFF;
+}
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+ int err;
+
+ if (!block->implemented)
+ return 0;
+
+ rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+ err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+ true);
+ return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create a IOMMU mapping for the physcial address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+
+ return 0;
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ rvu_npa_freemem(rvu);
+ rvu_npc_freemem(rvu);
+ rvu_nix_freemem(rvu);
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+nix:
+ /* Init NIX LF's bitmap */
+ block = &hw->block[BLKADDR_NIX0];
+ if (!block->implemented)
+ goto sso;
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = BLKADDR_NIX0;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+sso:
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+cpt:
+ /* Init CPT LF's bitmap */
+ block = &hw->block[BLKADDR_CPT0];
+ if (!block->implemented)
+ goto init;
+ cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = BLKADDR_CPT0;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+init:
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf)
+ return -ENOMEM;
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf)
+ return -ENOMEM;
+
+ spin_lock_init(&rvu->rsrc_lock);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err)
+ return err;
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map)
+ return -ENOMEM;
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ err = rvu_npc_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_npa_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_nix_init(rvu);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+ if (!aq)
+ return;
+
+ qmem_free(rvu->dev, aq->inst);
+ qmem_free(rvu->dev, aq->res);
+ devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Alloc memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* Alloc memory for results */
+ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+ if (err) {
+ rvu_aq_free(rvu, aq);
+ return err;
+ }
+
+ spin_lock_init(&aq->lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return pfvf->sso;
+ case BLKTYPE_SSOW:
+ return pfvf->ssow;
+ case BLKTYPE_TIM:
+ return pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs;
+ }
+ return 0;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool detach_all = true;
+ struct rvu_block *block;
+ int blkid;
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ spin_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+ int blktype, int num_lfs)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int free_lfs, mappedlfs;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ block = &hw->block[BLKADDR_CPT0];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int err;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ }
+
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &rvu->mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+ break;
+bad_message:
+ default:
+ otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
+ req->id);
+ return -ENODEV;
+ }
+}
+
+static void rvu_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+ u16 pf;
+
+ mbox = &rvu->mbox;
+ pf = mwork - rvu->mbox_wrk;
+ mdev = &mbox->dev[pf];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF sent this message based on mbox IRQ */
+ msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
+ err = rvu_process_mbox_msg(rvu, pf, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ }
+
+ /* Send mbox responses to PF */
+ otx2_mbox_msg_send(mbox, pf);
+}
+
+static void rvu_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id;
+ u16 pf;
+
+ mbox = &rvu->mbox_up;
+ pf = mwork - rvu->mbox_wrk_up;
+ mdev = &mbox->dev[pf];
+
+ rsp_hdr = mdev->mbase + mbox->rx_start;
+ if (rsp_hdr->num_msgs == 0) {
+ dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+ return;
+ }
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(rvu->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(rvu->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(rvu->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int rvu_mbox_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ void __iomem *hwbase = NULL;
+ struct rvu_work *mwork;
+ u64 bar4_addr;
+ int err, pf;
+
+ rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ hw->total_pfs);
+ if (!rvu->mbox_wq)
+ return -ENOMEM;
+
+ rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk_up) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Map mbox region shared with PFs */
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * RVU devices, shouldn't be mapped as device memory to allow
+ * unaligned accesses.
+ */
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ if (!hwbase) {
+ dev_err(rvu->dev, "Unable to map mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF_UP, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_handler);
+ }
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk_up[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ }
+
+ return 0;
+exit:
+ if (hwbase)
+ iounmap((void __iomem *)hwbase);
+ destroy_workqueue(rvu->mbox_wq);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct rvu *rvu)
+{
+ if (rvu->mbox_wq) {
+ flush_workqueue(rvu->mbox_wq);
+ destroy_workqueue(rvu->mbox_wq);
+ rvu->mbox_wq = NULL;
+ }
+
+ if (rvu->mbox.hwbase)
+ iounmap((void __iomem *)rvu->mbox.hwbase);
+
+ otx2_mbox_destroy(&rvu->mbox);
+ otx2_mbox_destroy(&rvu->mbox_up);
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+
+ /* Sync with mbox memory region */
+ smp_wmb();
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ mbox = &rvu->mbox;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk[pf].work);
+ mbox = &rvu->mbox_up;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk_up[pf].work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq])
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ return 0;
+
+fail:
+ pci_free_irq_vectors(rvu->pdev);
+ return ret;
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set consistent DMA mask\n");
+ goto err_release_regions;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_release_regions;
+
+ err = rvu_mbox_init(rvu);
+ if (err)
+ goto err_hwsetup;
+
+ err = rvu_cgx_probe(rvu);
+ if (err)
+ goto err_mbox;
+
+ err = rvu_register_interrupts(rvu);
+ if (err)
+ goto err_cgx;
+
+ return 0;
+err_cgx:
+ rvu_cgx_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(rvu);
+err_hwsetup:
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_unregister_interrupts(rvu);
+ rvu_cgx_wq_destroy(rvu);
+ rvu_mbox_destroy(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000000..2c0580cd2807
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ struct admin_queue *aq; /* NIX/NPA AQ */
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+};
+
+struct nix_mcast {
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ int next_free_mce;
+ spinlock_t mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+ struct hlist_head head;
+ int count;
+ int max;
+};
+
+struct npc_mcam {
+ spinlock_t lock; /* MCAM entries and counters update lock */
+ u8 keysize; /* MCAM keysize 112/224/448 bits */
+ u8 banks; /* Number of MCAM banks */
+ u8 banks_per_entry;/* Number of keywords in key */
+ u16 banksize; /* Number of MCAM entries in each bank */
+ u16 total_entries; /* Total number of MCAM entries */
+ u16 entries; /* Total minus reserved for NIX LFs */
+ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
+ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+ u8 cgx_lmac;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+
+ /* NPA contexts */
+ struct qmem *aura_ctx;
+ struct qmem *pool_ctx;
+ struct qmem *npa_qints_ctx;
+ unsigned long *aura_bmap;
+ unsigned long *pool_bmap;
+
+ /* NIX contexts */
+ struct qmem *rq_ctx;
+ struct qmem *sq_ctx;
+ struct qmem *cq_ctx;
+ struct qmem *rss_ctx;
+ struct qmem *cq_ints_ctx;
+ struct qmem *nix_qints_ctx;
+ unsigned long *sq_bmap;
+ unsigned long *rq_bmap;
+ unsigned long *cq_bmap;
+
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+
+ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+
+ /* Broadcast pkt replication info */
+ u16 bcast_mce_idx;
+ struct nix_mce_list bcast_mce_list;
+};
+
+struct nix_txsch {
+ struct rsrc_bmap schq;
+ u8 lvl;
+ u16 *pfvf_map;
+};
+
+struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+};
+
+struct nix_hw {
+ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+ struct nix_mcast mcast;
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+ u8 cgx;
+ u8 lmac_per_cgx;
+ u8 cgx_links;
+ u8 lbk_links;
+ u8 sdp_links;
+ u8 npc_kpus; /* No of parser units */
+
+
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+ struct nix_hw *nix0;
+ struct npc_pkind pkind;
+ struct npc_mcam mcam;
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+
+ /* Mbox */
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+ struct workqueue_struct *mbox_wq;
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt; /* available cgx ports */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ unsigned long pf_notify_bmap; /* Flags for PF notification */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+/* Function Prototypes
+ * RVU
+ */
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+
+/* RVU HW reg validation */
+enum regmap_block {
+ TXSCHQ_HWREGMAP = 0,
+ MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+ *cgx_id = (map >> 4) & 0xF;
+ *lmac_id = (map & 0xF);
+}
+
+int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp);
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NIX APIs */
+int rvu_nix_init(struct rvu *rvu);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp);
+
+/* NPC APIs */
+int rvu_npc_init(struct rvu *rvu);
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti);
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index);
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000000..188185c15b4a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Returns bitmap of mapped PFs */
+static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+ return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ int cgx_cnt = rvu->cgx_cnt;
+ int cgx, lmac_cnt, lmac;
+ int pf = PF_CGXMAP_BASE;
+ int size, free_pkind;
+
+ if (!cgx_cnt)
+ return 0;
+
+ if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize offset 0 with an invalid cgx and lmac id */
+ rvu->pf2cgxlmac_map[0] = 0xFF;
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+ cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu->cgx_mapped_pfs++;
+ }
+ }
+ return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+ struct cgx_evq_entry *qentry;
+ unsigned long flags;
+ int err;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry)
+ return -ENOMEM;
+
+ /* Lock the event queue before we read the local link status */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &qentry->link_event.link_uinfo);
+ qentry->link_event.cgx_id = cgx_id;
+ qentry->link_event.lmac_id = lmac_id;
+ if (err)
+ goto skip_add;
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+ int err, pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+ do {
+ pfid = find_first_bit(&pfmap, 16);
+ clear_bit(pfid, &pfmap);
+
+ /* check if notification is enabled */
+ if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+ dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+ event->cgx_id, event->lmac_id,
+ linfo->link_up ? "UP" : "DOWN");
+ continue;
+ }
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ if (!msg)
+ continue;
+ msg->link_info = *linfo;
+ otx2_mbox_msg_send(&rvu->mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ if (err)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ pfid);
+ } while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* process event */
+ cgx_notify_pfs(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ flush_workqueue(rvu->cgx_evh_wq);
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_probe(struct rvu *rvu)
+{
+ int i, err;
+
+ /* find available cgx ports */
+ rvu->cgx_cnt = cgx_get_cgx_cnt();
+ if (!rvu->cgx_cnt) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return -ENODEV;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
+ GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (i = 0; i < rvu->cgx_cnt; i++)
+ rvu->cgx_idmap[i] = cgx_get_pdata(i);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ cgx_lmac_event_handler_init(rvu);
+ return 0;
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ /* Rx stats */
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ if (err)
+ return err;
+ rsp->rx_stats[stat] = rx_stat;
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ if (err)
+ return err;
+ rsp->tx_stats[stat] = tx_stat;
+ stat++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0, i;
+ u64 cfg;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+ cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+ /* copy 48 bit mac address to req->mac_addr */
+ for (i = 0; i < ETH_ALEN; i++)
+ rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+ return 0;
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (en) {
+ set_bit(pf, &rvu->pf_notify_bmap);
+ /* Send the current link status to PF */
+ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+ } else {
+ clear_bit(pf, &rvu->pf_notify_bmap);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp)
+{
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(req->hdr.pcifunc);
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->link_info);
+ return err;
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, en);
+}
+
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 000000000000..8890c95831ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+
+enum mc_tbl_sz {
+ MC_TBL_SZ_256,
+ MC_TBL_SZ_512,
+ MC_TBL_SZ_1K,
+ MC_TBL_SZ_2K,
+ MC_TBL_SZ_4K,
+ MC_TBL_SZ_8K,
+ MC_TBL_SZ_16K,
+ MC_TBL_SZ_32K,
+ MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+ MC_BUF_CNT_8,
+ MC_BUF_CNT_16,
+ MC_BUF_CNT_32,
+ MC_BUF_CNT_64,
+ MC_BUF_CNT_128,
+ MC_BUF_CNT_256,
+ MC_BUF_CNT_512,
+ MC_BUF_CNT_1024,
+ MC_BUF_CNT_2048,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE MC_TBL_SZ_512
+#define MC_BUF_CNT MC_BUF_CNT_128
+
+struct mce {
+ struct hlist_node node;
+ u16 idx;
+ u16 pcifunc;
+};
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+ return block->lf.max;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+ INIT_HLIST_HEAD(&list->head);
+ list->count = 0;
+ list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+ int idx;
+
+ if (!mcast)
+ return 0;
+
+ idx = mcast->next_free_mce;
+ mcast->next_free_mce += count;
+ return idx;
+}
+
+static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ if (blkaddr == BLKADDR_NIX0 && hw->nix0)
+ return hw->nix0;
+
+ return NULL;
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ int lvl, u16 pcifunc, u16 schq)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return false;
+
+ txsch = &nix_hw->txsch[lvl];
+ /* Check out of bounds */
+ if (schq >= txsch->schq.max)
+ return false;
+
+ spin_lock(&rvu->rsrc_lock);
+ if (txsch->pfvf_map[schq] != pcifunc) {
+ spin_unlock(&rvu->rsrc_lock);
+ return false;
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u8 cgx_id, lmac_id;
+ int pkind, pf;
+ int err;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid pkind\n", pcifunc);
+ return -EINVAL;
+ }
+ pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+ break;
+ case NIX_INTF_TYPE_LBK:
+ break;
+ }
+
+ /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+ * RVU PF/VF's MAC address.
+ */
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, pfvf->mac_addr);
+
+ /* Add this PF_FUNC to bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to enable PF_FUNC 0x%x\n",
+ pcifunc);
+ return err;
+ }
+
+ rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+ nixlf, pfvf->rx_chan_base);
+
+ return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+ int err;
+
+ /* Remove this PF_FUNC from bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to disable PF_FUNC 0x%x\n",
+ pcifunc);
+ }
+
+ /* Free and disable any MCAM entries used by this NIX LF */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+ u64 format, bool v4, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* IP's Length field */
+ field.layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field.offset = v4 ? 2 : 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_PAYLEN;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* No ID field in IPv6 header */
+ if (!v4)
+ return;
+
+ /* IP's ID field */
+ field.layer = NIX_TXLAYER_OL3;
+ field.offset = 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_SEGNUM;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+ u64 format, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* TCP's sequence number field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 4;
+ field.sizem1 = 3; /* i.e 4 bytes */
+ field.alg = NIX_LSOALG_ADD_OFFSET;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* TCP's flags field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 12;
+ field.sizem1 = 0; /* not needed */
+ field.alg = NIX_LSOALG_TCP_FLAGS;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+{
+ u64 cfg, idx, fidx = 0;
+
+ /* Enable LSO */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+ /* For TSO, set first and middle segment flags to
+ * mask out PSH, RST & FIN flags in TCP packet
+ */
+ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+ cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+ rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+ /* Configure format fields for TCPv4 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+
+ /* Configure format fields for TCPv6 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ fidx = 0;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->rq_bmap);
+ kfree(pfvf->sq_bmap);
+ kfree(pfvf->cq_bmap);
+ if (pfvf->rq_ctx)
+ qmem_free(rvu->dev, pfvf->rq_ctx);
+ if (pfvf->sq_ctx)
+ qmem_free(rvu->dev, pfvf->sq_ctx);
+ if (pfvf->cq_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ctx);
+ if (pfvf->rss_ctx)
+ qmem_free(rvu->dev, pfvf->rss_ctx);
+ if (pfvf->nix_qints_ctx)
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ if (pfvf->cq_ints_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+ pfvf->rq_bmap = NULL;
+ pfvf->cq_bmap = NULL;
+ pfvf->sq_bmap = NULL;
+ pfvf->rq_ctx = NULL;
+ pfvf->sq_ctx = NULL;
+ pfvf->cq_ctx = NULL;
+ pfvf->rss_ctx = NULL;
+ pfvf->nix_qints_ctx = NULL;
+ pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+ struct rvu_pfvf *pfvf, int nixlf,
+ int rss_sz, int rss_grps, int hwctx_size)
+{
+ int err, grp, num_indices;
+
+ /* RSS is not requested for this NIXLF */
+ if (!rss_sz)
+ return 0;
+ num_indices = rss_sz * rss_grps;
+
+ /* Alloc NIX RSS HW context memory and config the base */
+ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+ (u64)pfvf->rss_ctx->iova);
+
+ /* Config full RSS table size, enable RSS and caching */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
+ BIT_ULL(36) | BIT_ULL(4) |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ /* Config RSS group offset and sizes */
+ for (grp = 0; grp < rss_grps; grp++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+ ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+ return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct nix_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct nix_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct nix_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+ while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NIX_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, blkaddr, rc = 0;
+ struct nix_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+ u64 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ switch (req->ctype) {
+ case NIX_AQ_CTYPE_RQ:
+ /* Check if index exceeds max no of queues */
+ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_SQ:
+ if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_RSS:
+ /* Check if RSS is enabled and qidx is within range */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+ if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_MCE:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+ /* Check if index exceeds MCE list length */
+ if (!hw->nix0->mcast.mce_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+ /* Adding multicast lists for requests from PF/VFs is not
+ * yet supported, so ignore this.
+ */
+ if (rsp)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ if (rc)
+ return rc;
+
+ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ req->op != NIX_AQ_INSTOP_WRITE) {
+ if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+ pcifunc, req->sq.smq))
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+ inst.lf = nixlf;
+ inst.cindex = req->qidx;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NIX_AQ_INSTOP_WRITE:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(mask, &req->rq_mask,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(mask, &req->sq_mask,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(mask, &req->cq_mask,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(mask, &req->rss_mask,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(mask, &req->mce_mask,
+ sizeof(struct nix_rx_mce_s));
+ /* Fall through */
+ case NIX_AQ_INSTOP_INIT:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ break;
+ case NIX_AQ_INSTOP_NOP:
+ case NIX_AQ_INSTOP_READ:
+ case NIX_AQ_INSTOP_LOCK:
+ case NIX_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ return rc;
+ }
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = nix_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+ if (req->op == NIX_AQ_INSTOP_INIT) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ }
+
+ if (req->op == NIX_AQ_INSTOP_WRITE) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ ena = (req->rq.ena & req->rq_mask.ena) |
+ (test_bit(req->qidx, pfvf->rq_bmap) &
+ ~req->rq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->rq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ ena = (req->rq.ena & req->sq_mask.ena) |
+ (test_bit(req->qidx, pfvf->sq_bmap) &
+ ~req->sq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->sq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ ena = (req->rq.ena & req->cq_mask.ena) |
+ (test_bit(req->qidx, pfvf->cq_bmap) &
+ ~req->cq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->cq_bmap);
+ }
+ }
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NIX_AQ_INSTOP_READ) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(&rsp->rq, ctx,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(&rsp->sq, ctx,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(&rsp->cq, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(&rsp->rss, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(&rsp->mce, ctx,
+ sizeof(struct nix_rx_mce_s));
+ }
+ }
+
+ spin_unlock(&aq->lock);
+ return 0;
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct nix_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int qidx, q_cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+ return NIX_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ aq_req.cq.ena = 0;
+ aq_req.cq_mask.ena = 1;
+ q_cnt = pfvf->cq_ctx->qsize;
+ bmap = pfvf->cq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ aq_req.sq.ena = 0;
+ aq_req.sq_mask.ena = 1;
+ q_cnt = pfvf->sq_ctx->qsize;
+ bmap = pfvf->sq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ aq_req.rq.ena = 0;
+ aq_req.rq_mask.ena = 1;
+ q_cnt = pfvf->rq_ctx->qsize;
+ bmap = pfvf->rq_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+ for (qidx = 0; qidx < q_cnt; qidx++) {
+ if (!test_bit(qidx, bmap))
+ continue;
+ aq_req.qidx = qidx;
+ rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NIX_AQ_CTYPE_CQ) ?
+ "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
+ "RQ" : "SQ"), qidx);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ int nixlf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+ return NIX_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* If RSS is being enabled, check if requested config is valid.
+ * RSS table size should be power of two, otherwise
+ * RSS_GRP::OFFSET + adder might go beyond that group or
+ * won't be able to use entire table.
+ */
+ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+ !is_power_of_2(req->rss_sz)))
+ return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+ if (req->rss_sz &&
+ (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+ return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+ /* Alloc NIX RQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->rq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+ (u64)pfvf->rq_ctx->iova);
+
+ /* Set caching and queue count in HW */
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX SQ HW context memory and config the base */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->sq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+ (u64)pfvf->sq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX CQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->cq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+ (u64)pfvf->cq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+ /* Initialize receive side scaling (RSS) */
+ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
+ req->rss_sz, req->rss_grps, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Enable LMTST for this NIX LF */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
+ * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
+ * PCIFUNC itself.
+ */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ cfg = pcifunc;
+ else
+ cfg = req->npa_func;
+
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ cfg |= (u64)pcifunc << 16;
+ else
+ cfg |= (u64)req->sso_func << 16;
+
+ cfg |= (u64)req->xqe_sz << 33;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+ /* Config Rx pkt length, csum checks and apad enable / disable */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+ err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ if (err)
+ goto free_mem;
+
+ goto exit;
+
+free_mem:
+ nix_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* Set macaddr of this PF/VF */
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ /* set SQB size info */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+ rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+ rsp->rx_chan_base = pfvf->rx_chan_base;
+ rsp->tx_chan_base = pfvf->tx_chan_base;
+ rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+ rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+ rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ return rc;
+}
+
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ nix_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 cir_reg = 0, pir_reg = 0;
+ u64 cfg;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ cir_reg = NIX_AF_TL1X_CIR(schq);
+ pir_reg = 0; /* PIR not available at TL1 */
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ cir_reg = NIX_AF_TL2X_CIR(schq);
+ pir_reg = NIX_AF_TL2X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ cir_reg = NIX_AF_TL3X_CIR(schq);
+ pir_reg = NIX_AF_TL3X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ cir_reg = NIX_AF_TL4X_CIR(schq);
+ pir_reg = NIX_AF_TL4X_PIR(schq);
+ break;
+ }
+
+ if (!cir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, cir_reg);
+ rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+ if (!pir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, pir_reg);
+ rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link;
+
+ /* Reset TL4's SDP link config */
+ if (lvl == NIX_TXSCH_LVL_TL4)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+ if (lvl != NIX_TXSCH_LVL_TL2)
+ return;
+
+ /* Reset TL2's CGX or LBK link config */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ int lvl, idx, req_schq;
+ struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
+ int blkaddr, rc = 0;
+ u16 schq;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ /* There are only 28 TL1s */
+ if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
+ goto err;
+
+ /* Check if request is valid */
+ if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ goto err;
+
+ /* If contiguous queues are needed, check for availability */
+ if (req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ goto err;
+
+ /* Check if full request can be accommodated */
+ if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ goto err;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
+ rsp->schq[lvl] = req->schq[lvl];
+
+ schq = 0;
+ /* Alloc contiguous queues first */
+ if (req->schq_contig[lvl]) {
+ schq = rvu_alloc_rsrc_contig(&txsch->schq,
+ req->schq_contig[lvl]);
+
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Alloc non-contiguous queues */
+ for (idx = 0; idx < req->schq[lvl]; idx++) {
+ schq = rvu_alloc_rsrc(&txsch->schq);
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_list[lvl][idx] = schq;
+ }
+ }
+ goto exit;
+err:
+ rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, lvl, schq, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Disable TL2/3 queue links before SMQ flush*/
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ continue;
+
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ }
+ }
+
+ /* Flush SMQs */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (err) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Now free scheduler queues to free pool */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+
+ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ if (err)
+ dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+}
+
+static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase = reg & 0xFFFF;
+ u16 schq, parent;
+
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+ return false;
+
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ /* Check if this schq belongs to this PF/VF or not */
+ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+ return false;
+
+ parent = (regval >> 16) & 0x1FF;
+ /* Validate MDQ's TL4 parent */
+ if (regbase == NIX_AF_MDQX_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+ return false;
+
+ /* Validate TL4's TL3 parent */
+ if (regbase == NIX_AF_TL4X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+ return false;
+
+ /* Validate TL3's TL2 parent */
+ if (regbase == NIX_AF_TL3X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+ return false;
+
+ /* Validate TL2's TL1 parent */
+ if (regbase == NIX_AF_TL2X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+ return false;
+
+ return true;
+}
+
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 reg, regval, schq_regbase;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, err;
+ int nixlf;
+
+ if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG)
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ txsch = &nix_hw->txsch[req->lvl];
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ regval = req->regval[idx];
+ schq_regbase = reg & 0xFFFF;
+
+ if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ pcifunc, 0);
+ regval &= ~(0x7FULL << 24);
+ regval |= ((u64)nixlf << 24);
+ }
+
+ rvu_write64(rvu, blkaddr, reg, regval);
+
+ /* Check for SMQ flush, if so, poll for its completion */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+ (regval & BIT_ULL(49))) {
+ err = rvu_poll_reg(rvu, blkaddr,
+ reg, BIT_ULL(49), true);
+ if (err)
+ return NIX_AF_SMQ_FLUSH_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ u64 regval = 0;
+
+#define NIX_VTAGTYPE_MAX 0x8ull
+#define NIX_VTAGSIZE_MASK 0x7ull
+#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+
+ if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
+ req->vtag_size > VTAGSIZE_T8)
+ return -EINVAL;
+
+ regval = rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
+
+ if (req->rx.strip_vtag && req->rx.capture_vtag)
+ regval |= BIT_ULL(4) | BIT_ULL(5);
+ else if (req->rx.strip_vtag)
+ regval |= BIT_ULL(4);
+ else
+ regval &= ~(BIT_ULL(4) | BIT_ULL(5));
+
+ regval &= ~NIX_VTAGSIZE_MASK;
+ regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->cfg_type) {
+ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+ if (err)
+ return NIX_AF_ERR_PARAM;
+ } else {
+ /* TODO: handle tx vtag configuration */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
+ u16 pcifunc, int next, bool eol)
+{
+ struct nix_aq_enq_req aq_req;
+ int err;
+
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_MCE;
+ aq_req.op = op;
+ aq_req.qidx = mce;
+
+ /* Forward bcast pkts to RQ0, RSS not needed */
+ aq_req.mce.op = 0;
+ aq_req.mce.index = 0;
+ aq_req.mce.eol = eol;
+ aq_req.mce.pf_func = pcifunc;
+ aq_req.mce.next = next;
+
+ /* All fields valid */
+ *(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+ err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (err) {
+ dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return err;
+ }
+ return 0;
+}
+
+static int nix_update_mce_list(struct nix_mce_list *mce_list,
+ u16 pcifunc, int idx, bool add)
+{
+ struct mce *mce, *tail = NULL;
+ bool delete = false;
+
+ /* Scan through the current list */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == pcifunc && !add) {
+ delete = true;
+ break;
+ }
+ tail = mce;
+ }
+
+ if (delete) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ return 0;
+ }
+
+ if (!add)
+ return 0;
+
+ /* Add a new one to the list, at the tail */
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ return -ENOMEM;
+ mce->idx = idx;
+ mce->pcifunc = pcifunc;
+ if (!tail)
+ hlist_add_head(&mce->node, &mce_list->head);
+ else
+ hlist_add_behind(&mce->node, &tail->node);
+ mce_list->count++;
+ return 0;
+}
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+{
+ int err = 0, idx, next_idx, count;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *next_mce;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return 0;
+
+ mcast = &nix_hw->mcast;
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+ mce_list = &pfvf->bcast_mce_list;
+ if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ dev_err(rvu->dev,
+ "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+ __func__, idx, mce_list->max,
+ pcifunc >> RVU_PFVF_PF_SHIFT);
+ return -EINVAL;
+ }
+
+ spin_lock(&mcast->mce_lock);
+
+ err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ if (err)
+ goto end;
+
+ /* Disable MCAM entry in NPC */
+
+ if (!mce_list->count)
+ goto end;
+ count = mce_list->count;
+
+ /* Dump the updated list to HW */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ next_idx = 0;
+ count--;
+ if (count) {
+ next_mce = hlist_entry(mce->node.next,
+ struct mce, node);
+ next_idx = next_mce->idx;
+ }
+ /* EOL should be set in last MCE */
+ err = nix_setup_mce(rvu, mce->idx,
+ NIX_AQ_INSTOP_WRITE, mce->pcifunc,
+ next_idx, count ? false : true);
+ if (err)
+ goto end;
+ }
+
+end:
+ spin_unlock(&mcast->mce_lock);
+ return err;
+}
+
+static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ int err, pf, numvfs, idx;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Skip PF0 (i.e AF) */
+ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+ /* Get numVFs attached to this PF */
+ numvfs = (cfg >> 12) & 0xFF;
+
+ pfvf = &rvu->pf[pf];
+ /* Save the start MCE */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+
+ nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+ for (idx = 0; idx < (numvfs + 1); idx++) {
+ /* idx-0 is for PF, followed by VFs */
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc |= idx;
+ /* Add dummy entries now, so that we don't have to check
+ * for whether AQ_OP should be INIT/WRITE later on.
+ * Will be updated when a NIXLF is attached/detached to
+ * these PF/VFs.
+ */
+ err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err, size;
+
+ size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+ size = (1ULL << size);
+
+ /* Alloc memory for multicast/mirror replication entries */
+ err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+ (256UL << MC_TBL_SIZE), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+ (u64)mcast->mce_ctx->iova);
+
+ /* Set max list length equal to max no of VFs per PF + PF itself */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+ BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+ /* Alloc memory for multicast replication buffers */
+ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+ err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+ (8UL << MC_BUF_CNT), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+ (u64)mcast->mcast_buf->iova);
+
+ /* Alloc pkind for NIX internal RX multicast/mirror replay */
+ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+ BIT_ULL(63) | (mcast->replay_pkind << 24) |
+ BIT_ULL(20) | MC_BUF_CNT);
+
+ spin_lock_init(&mcast->mce_lock);
+
+ return nix_setup_bcast_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_txsch *txsch;
+ u64 cfg, reg;
+ int err, lvl;
+
+ /* Get scheduler queue count of each type and alloc
+ * bitmap for each for alloc/free/attach operations.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ txsch->lvl = lvl;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg = NIX_AF_MDQ_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1_CONST;
+ break;
+ }
+ cfg = rvu_read64(rvu, blkaddr, reg);
+ txsch->schq.max = cfg & 0xFFFF;
+ err = rvu_alloc_bitmap(&txsch->schq);
+ if (err)
+ return err;
+
+ /* Allocate memory for scheduler queues to
+ * PF/VF pcifunc mapping info.
+ */
+ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!txsch->pfvf_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int i, nixlf, blkaddr;
+ u64 stats;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Get stats count supported by HW */
+ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* Reset tx stats */
+ for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+ /* Reset rx stats */
+ for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+ return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(u32 flow_cfg)
+{
+ u32 ip_cfg;
+
+ flow_cfg &= ~FLOW_KEY_TYPE_PORT;
+ ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ if (flow_cfg == ip_cfg)
+ return FLOW_KEY_ALG_IP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
+ return FLOW_KEY_ALG_TCP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_TCP_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_UDP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_UDP_SCTP;
+
+ return FLOW_KEY_ALG_PORT;
+}
+
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+ struct nix_rx_flowkey_alg *field = NULL;
+ int idx, key_type;
+
+ if (!alg)
+ return;
+
+ /* FIELD0: IPv4
+ * FIELD1: IPv6
+ * FIELD2: TCP/UDP/SCTP/ALL
+ * FIELD3: Unused
+ * FIELD4: Unused
+ *
+ * Each of the 32 possible flow key algorithm definitions should
+ * fall into above incremental config (except ALG0). Otherwise a
+ * single NPC MCAM entry is not sufficient for supporting RSS.
+ *
+ * If a different definition or combination needed then NPC MCAM
+ * has to be programmed to filter such pkts and it's action should
+ * point to this definition to calculate flowtag or hash.
+ */
+ for (idx = 0; idx < 32; idx++) {
+ key_type = flow_cfg & BIT_ULL(idx);
+ if (!key_type)
+ continue;
+ switch (key_type) {
+ case FLOW_KEY_TYPE_PORT:
+ field = &alg[0];
+ field->sel_chan = true;
+ /* This should be set to 1, when SEL_CHAN is set */
+ field->bytesm1 = 1;
+ break;
+ case FLOW_KEY_TYPE_IPV4:
+ field = &alg[0];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP;
+ field->hdr_offset = 12; /* SIP offset */
+ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv4 */
+ break;
+ case FLOW_KEY_TYPE_IPV6:
+ field = &alg[1];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP6;
+ field->hdr_offset = 8; /* SIP offset */
+ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv6 */
+ break;
+ case FLOW_KEY_TYPE_TCP:
+ case FLOW_KEY_TYPE_UDP:
+ case FLOW_KEY_TYPE_SCTP:
+ field = &alg[2];
+ field->lid = NPC_LID_LD;
+ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (key_type == FLOW_KEY_TYPE_TCP)
+ field->ltype_match |= NPC_LT_LD_TCP;
+ else if (key_type == FLOW_KEY_TYPE_UDP)
+ field->ltype_match |= NPC_LT_LD_UDP;
+ else if (key_type == FLOW_KEY_TYPE_SCTP)
+ field->ltype_match |= NPC_LT_LD_SCTP;
+ field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+ field->ltype_mask = ~field->ltype_match;
+ break;
+ }
+ if (field)
+ field->ena = 1;
+ field = NULL;
+ }
+}
+
+static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+#define FIELDS_PER_ALG 5
+ u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid;
+
+ memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+
+ /* Only incoming channel number */
+ flowkey_cfg = FLOW_KEY_TYPE_PORT;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+
+ /* For a incoming pkt if none of the fields match then flowkey
+ * will be zero, hence tag generated will also be zero.
+ * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
+ * be used to queue the packet.
+ */
+
+ /* IPv4/IPv6 SIP/DIPs */
+ flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+
+ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ minkey_cfg = flowkey_cfg;
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+
+ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+
+ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+
+ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+
+ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+
+ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+
+ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
+ flowkey_cfg);
+
+ for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ field[alg][fid]);
+ }
+}
+
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, req->mac_addr);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp)
+{
+ bool allmulti = false, disable_promisc = false;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->mode & NIX_RX_MODE_PROMISC)
+ allmulti = false;
+ else if (req->mode & NIX_RX_MODE_ALLMULTI)
+ allmulti = true;
+ else
+ disable_promisc = true;
+
+ if (disable_promisc)
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, allmulti);
+ return 0;
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+ int idx, err;
+ u64 status;
+
+ /* Start X2P bus calibration */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+ /* Wait for calibration to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_STATUS, BIT_ULL(10), false);
+ if (err) {
+ dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+ return err;
+ }
+
+ status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+ /* Check if CGX devices are ready */
+ for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
+ if (status & (BIT_ULL(16 + idx)))
+ continue;
+ dev_err(rvu->dev,
+ "CGX%d didn't respond to NIX X2P calibration\n", idx);
+ err = -EBUSY;
+ }
+
+ /* Check if LBK is ready */
+ if (!(status & BIT_ULL(19))) {
+ dev_err(rvu->dev,
+ "LBK didn't respond to NIX X2P calibration\n");
+ err = -EBUSY;
+ }
+
+ /* Clear 'calibrate_x2p' bit */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+ if (err || (status & 0x3FFULL))
+ dev_err(rvu->dev,
+ "NIX X2P calibration failed, status 0x%llx\n", status);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+ cfg &= ~0x3FFEULL;
+ rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by RQ/SQ/CQ context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+ ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, err;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &hw->block[blkaddr];
+
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+ err = nix_calibrate_x2p(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Set num of links of each type */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ hw->cgx = (cfg >> 12) & 0xF;
+ hw->lmac_per_cgx = (cfg >> 8) & 0xF;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = 1;
+ hw->sdp_links = 1;
+
+ /* Initialize admin queue */
+ err = nix_aq_init(rvu, block);
+ if (err)
+ return err;
+
+ /* Restore CINT timer delay to HW reset values */
+ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, blkaddr);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ hw->nix0 = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_hw), GFP_KERNEL);
+ if (!hw->nix0)
+ return -ENOMEM;
+
+ err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ /* Config Outer L2, IP, TCP and UDP's NPC layer info.
+ * This helps HW protocol checker to identify headers
+ * and validate length and checksums.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ }
+ return 0;
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int blkaddr, lvl;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ kfree(txsch->schq.bmap);
+ }
+
+ mcast = &nix_hw->mcast;
+ qmem_free(rvu->dev, mcast->mce_ctx);
+ qmem_free(rvu->dev, mcast->mcast_buf);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 000000000000..7531fdc54fa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct npa_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct npa_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct npa_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+ while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NPA_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, npalf, rc = 0;
+ struct npa_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+ return NPA_AF_ERR_AQ_ENQUEUE;
+ }
+
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+ inst.cindex = req->aura_id;
+ inst.lf = npalf;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NPA_AQ_INSTOP_WRITE:
+ /* Copy context and write mask */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ memcpy(mask, &req->aura_mask,
+ sizeof(struct npa_aura_s));
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else {
+ memcpy(mask, &req->pool_mask,
+ sizeof(struct npa_pool_s));
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_INIT:
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+ /* Set pool's context address */
+ req->aura.pool_addr = pfvf->pool_ctx->iova +
+ (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else { /* POOL's context */
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_NOP:
+ case NPA_AQ_INSTOP_READ:
+ case NPA_AQ_INSTOP_LOCK:
+ case NPA_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = npa_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set aura bitmap if aura hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->aura.ena & req->aura_mask.ena) |
+ (test_bit(req->aura_id, pfvf->aura_bmap) &
+ ~req->aura_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->aura_bmap);
+ }
+ }
+
+ /* Set pool bitmap if pool hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->pool.ena & req->pool_mask.ena) |
+ (test_bit(req->aura_id, pfvf->pool_bmap) &
+ ~req->pool_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->pool_bmap);
+ }
+ }
+ spin_unlock(&aq->lock);
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NPA_AQ_INSTOP_READ) {
+ if (req->ctype == NPA_AQ_CTYPE_AURA)
+ memcpy(&rsp->aura, ctx,
+ sizeof(struct npa_aura_s));
+ else
+ memcpy(&rsp->pool, ctx,
+ sizeof(struct npa_pool_s));
+ }
+ }
+
+ return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npa_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int id, cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ aq_req.pool.ena = 0;
+ aq_req.pool_mask.ena = 1;
+ cnt = pfvf->pool_ctx->qsize;
+ bmap = pfvf->pool_bmap;
+ } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ aq_req.aura.ena = 0;
+ aq_req.aura_mask.ena = 1;
+ cnt = pfvf->aura_ctx->qsize;
+ bmap = pfvf->aura_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+ for (id = 0; id < cnt; id++) {
+ if (!test_bit(id, bmap))
+ continue;
+ aq_req.aura_id = id;
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", id);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->aura_bmap);
+ pfvf->aura_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->aura_ctx);
+ pfvf->aura_ctx = NULL;
+
+ kfree(pfvf->pool_bmap);
+ pfvf->pool_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->pool_ctx);
+ pfvf->pool_ctx = NULL;
+
+ qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+ pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ int npalf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (req->aura_sz > NPA_AURA_SZ_MAX ||
+ req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+ return NPA_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+ /* Alloc memory for aura HW contexts */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+ NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->aura_bmap)
+ goto free_mem;
+
+ /* Alloc memory for pool HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->pool_bmap)
+ goto free_mem;
+
+ /* Get no of queue interrupts supported */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ qints = (cfg >> 28) & 0xFFF;
+
+ /* Alloc memory for Qints HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+ /* Clear way partition mask and set aura offset to '0' */
+ cfg &= ~(BIT_ULL(34) - 1);
+ /* Set aura size & enable caching of contexts */
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+ /* Configure aura HW context's base */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+ (u64)pfvf->aura_ctx->iova);
+
+ /* Enable caching of qints hw context */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+ (u64)pfvf->npa_qints_ctx->iova);
+
+ goto exit;
+
+free_mem:
+ npa_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* set stack page info */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+ rsp->stack_pg_bytes = cfg & 0xFF;
+ rsp->qints = (cfg >> 28) & 0xFFF;
+ return rc;
+}
+
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int npalf, err;
+ int blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ npa_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by Aura/Pool context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+ ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ /* Initialize admin queue */
+ err = npa_aq_init(rvu, &hw->block[blkaddr]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 000000000000..23ff47f7efc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "npc_profile.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
+
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_PROMISC_ENTRY 2
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+ int blkaddr;
+ u64 val = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Config CPI base for the PKIND */
+ val = pkind | 1ULL << 62;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u32 map;
+ int i;
+
+ for (i = 0; i < pkind->rsrc.max; i++) {
+ map = pkind->pfchan_map[i];
+ if (((map >> 16) & 0x3F) == pf)
+ return i;
+ }
+ return -1;
+}
+
+static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int index;
+
+ /* Check if this is for a PF */
+ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Reserved entries exclude PF0 */
+ pf--;
+ index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+ /* Broadcast address matching entry should be first so
+ * that the packet can be replicated to all VFs.
+ */
+ if (type == NIXLF_BCAST_ENTRY)
+ return index;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 1;
+ }
+
+ return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+}
+
+static int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+ int bank = index / mcam->banksize;
+
+ /* 0,1 & 2,3 banks are combined for this keysize */
+ if (mcam->keysize == NPC_MCAM_KEY_X2)
+ return bank ? 2 : 0;
+
+ return bank;
+}
+
+static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ u64 cfg;
+
+ index &= (mcam->banksize - 1);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+ return (cfg & 1);
+}
+
+static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+ enable ? 1 : 0);
+ }
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+ u64 *cam0, u64 *cam1)
+{
+ u64 kw_mask = 0x00;
+
+#define CAM_MASK(n) (BIT_ULL(n) - 1)
+
+ /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+ * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+ *
+ * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+ */
+ switch (idx) {
+ case 0:
+ /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+ *cam1 = entry->kw[0];
+ kw_mask = entry->kw_mask[0];
+ break;
+ case 1:
+ /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+ *cam1 = entry->kw[1] & CAM_MASK(48);
+ kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+ break;
+ case 2:
+ /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+ * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+ */
+ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+ kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+ break;
+ case 3:
+ /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+ * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+ */
+ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+ kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+ break;
+ case 4:
+ /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+ * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+ */
+ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+ kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+ break;
+ case 5:
+ /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+ * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+ */
+ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+ kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+ break;
+ case 6:
+ /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+ * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+ */
+ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+ *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+ kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+ kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+ break;
+ case 7:
+ /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+ kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+ break;
+ }
+
+ *cam1 &= kw_mask;
+ *cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u8 intf,
+ struct mcam_entry *entry, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int kw = 0, actbank, actindex;
+ u64 cam0, cam1;
+
+ actbank = bank; /* Save bank id, to set action later on */
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* CAM1 takes the comparison value and
+ * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+ * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+ * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+ * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+ */
+ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ /* Interface should be set in all banks */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+ intf);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+ ~intf & 0x3);
+
+ /* Set the match key */
+ npc_get_keyword(entry, kw, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+ npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+ }
+
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+ entry->vtag_action);
+
+ /* Enable the entry */
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+ else
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+ u64 mac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* Match ingress channel and DMAC */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = mac;
+ entry.kw_mask[kwi] = BIT_ULL(48) - 1;
+
+ /* Don't change the action if entry is already enabled
+ * Otherwise RSS action may get overwritten.
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, index);
+ } else {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF or AF VF can add a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ if (allmulti) {
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
+ entry.kw_mask[kwi] = BIT_ULL(40);
+ }
+
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF's have a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+#ifdef MCAST_MCE
+ struct rvu_pfvf *pfvf;
+#endif
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF can add a bcast match entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+#ifdef MCAST_MCE
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+#endif
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+
+ /* Check for L2B bit and LMAC channel */
+ entry.kw[0] = BIT_ULL(25) | chan;
+ entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+
+ *(u64 *)&action = 0x00;
+#ifdef MCAST_MCE
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.pf_func = pcifunc;
+ action.index = pfvf->bcast_mce_idx;
+#else
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+#endif
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Check if this is for reserved default entry */
+ if (mcam_index < 0) {
+ if (group != DEFAULT_RSS_CONTEXT_GROUP)
+ return;
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ } else {
+ /* TODO: validate this mcam index */
+ index = mcam_index;
+ }
+
+ if (index >= mcam->total_entries)
+ return;
+
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* Ignore if no action was set earlier */
+ if (!*(u64 *)&action)
+ return;
+
+ action.op = NIX_RX_ACTIONOP_RSS;
+ action.pf_func = pcifunc;
+ action.index = group;
+ action.flow_key_alg = alg_idx;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Disable ucast MCAM match entry of this PF/VF */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* For PF, disable promisc and bcast MCAM match entries */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ /* For bcast, disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ }
+}
+
+#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int lid, ltype;
+ int lid_count;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ lid_count = (cfg >> 4) & 0xF;
+
+ /* First clear any existing config i.e
+ * disable LDATA and FLAGS extraction.
+ */
+ for (lid = 0; lid < lid_count; lid++) {
+ for (ltype = 0; ltype < 16; ltype++) {
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ }
+ }
+
+ /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
+ * then 112bit key is not sufficient
+ */
+ if (mcam->keysize != NPC_MCAM_KEY_X2)
+ return;
+
+ /* Start placing extracted data/flags from 64bit onwards, for now */
+ /* Extract DMAC from the packet */
+ cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_action *kpuaction,
+ int kpu, int entry, bool pkind)
+{
+ struct npc_kpu_action0 action0 = {0};
+ struct npc_kpu_action1 action1 = {0};
+ u64 reg;
+
+ action1.errlev = kpuaction->errlev;
+ action1.errcode = kpuaction->errcode;
+ action1.dp0_offset = kpuaction->dp0_offset;
+ action1.dp1_offset = kpuaction->dp1_offset;
+ action1.dp2_offset = kpuaction->dp2_offset;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION1(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+ action0.byp_count = kpuaction->bypass_count;
+ action0.capture_ena = kpuaction->cap_ena;
+ action0.parse_done = kpuaction->parse_done;
+ action0.next_state = kpuaction->next_state;
+ action0.capture_lid = kpuaction->lid;
+ action0.capture_ltype = kpuaction->ltype;
+ action0.capture_flags = kpuaction->flags;
+ action0.ptr_advance = kpuaction->ptr_advance;
+ action0.var_len_offset = kpuaction->offset;
+ action0.var_len_mask = kpuaction->mask;
+ action0.var_len_right = kpuaction->right;
+ action0.var_len_shift = kpuaction->shift;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION0(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_cam *kpucam,
+ int kpu, int entry)
+{
+ struct npc_kpu_cam cam0 = {0};
+ struct npc_kpu_cam cam1 = {0};
+
+ cam1.state = kpucam->state & kpucam->state_mask;
+ cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+ cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+ cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+ cam0.state = ~kpucam->state & kpucam->state_mask;
+ cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+ cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+ cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+ return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+ struct npc_kpu_profile *profile)
+{
+ int entry, num_entries, max_entries;
+
+ if (profile->cam_entries != profile->action_entries) {
+ dev_err(rvu->dev,
+ "KPU%d: CAM and action entries [%d != %d] not equal\n",
+ kpu, profile->cam_entries, profile->action_entries);
+ }
+
+ max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+
+ /* Program CAM match entries for previous KPU extracted data */
+ num_entries = min_t(int, profile->cam_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpucam(rvu, blkaddr,
+ &profile->cam[entry], kpu, entry);
+
+ /* Program this KPU's actions */
+ num_entries = min_t(int, profile->action_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+ kpu, entry, false);
+
+ /* Enable all programmed entries */
+ num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ if (num_entries > 64) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+ enable_mask(num_entries - 64));
+ }
+
+ /* Enable this KPU */
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int num_pkinds, num_kpus, idx;
+ struct npc_pkind *pkind;
+
+ /* Get HW limits */
+ hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
+
+ /* Disable all KPUs and their entries */
+ for (idx = 0; idx < hw->npc_kpus; idx++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+ }
+
+ /* First program IKPU profile i.e PKIND configs.
+ * Check HW max count to avoid configuring junk or
+ * writing to unsupported CSR addresses.
+ */
+ pkind = &hw->pkind;
+ num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+
+ for (idx = 0; idx < num_pkinds; idx++)
+ npc_config_kpuaction(rvu, blkaddr,
+ &ikpu_action_entries[idx], 0, idx, true);
+
+ /* Program KPU CAM and Action profiles */
+ num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+ for (idx = 0; idx < num_kpus; idx++)
+ npc_program_kpu_profile(rvu, blkaddr,
+ idx, &npc_kpu_profiles[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+ int nixlf_count = rvu_get_nixlf_count(rvu);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int rsvd;
+ u64 cfg;
+
+ /* Get HW limits */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ mcam->banks = (cfg >> 44) & 0xF;
+ mcam->banksize = (cfg >> 28) & 0xFFFF;
+
+ /* Actual number of MCAM entries vary by entry size */
+ cfg = (rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+ mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+ mcam->keysize = cfg;
+
+ /* Number of banks combined per MCAM entry */
+ if (cfg == NPC_MCAM_KEY_X4)
+ mcam->banks_per_entry = 4;
+ else if (cfg == NPC_MCAM_KEY_X2)
+ mcam->banks_per_entry = 2;
+ else
+ mcam->banks_per_entry = 1;
+
+ /* Reserve one MCAM entry for each of the NIX LF to
+ * guarantee space to install default matching DMAC rule.
+ * Also reserve 2 MCAM entries for each PF for default
+ * channel based matching or 'bcast & promisc' matching to
+ * support BCAST and PROMISC modes of operation for PFs.
+ * PF0 is excluded.
+ */
+ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+ ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+ if (mcam->total_entries <= rsvd) {
+ dev_warn(rvu->dev,
+ "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+ mcam->total_entries);
+ return -ENOMEM;
+ }
+
+ mcam->entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->entries;
+ mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+ spin_lock_init(&mcam->lock);
+
+ return 0;
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u64 keyz = NPC_MCAM_KEY_X2;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Allocate resource bimap for pkind*/
+ pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
+ NPC_AF_CONST1) >> 12) & 0xFF;
+ err = rvu_alloc_bitmap(&pkind->rsrc);
+ if (err)
+ return err;
+
+ /* Allocate mem for pkind to PF and channel mapping info */
+ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!pkind->pfchan_map)
+ return -ENOMEM;
+
+ /* Configure KPU profile */
+ npc_parser_profile_init(rvu, blkaddr);
+
+ /* Config Outer L2, IPv4's NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ /* Enable below for Rx pkts.
+ * - Outer IPv4 header checksum validation.
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+ rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+ BIT_ULL(6) | BIT_ULL(2));
+
+ /* Set RX and TX side MCAM search key size.
+ * Also enable parse key extract nibbles suchthat except
+ * layer E to H, rest of the key is included for MCAM search.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Config packet data and flags extraction into PARSE result */
+ npc_config_ldata_extract(rvu, blkaddr);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+
+ /* If MCAM lookup doesn't result in a match, drop the received packet */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
+ NIX_RX_ACTIONOP_DROP);
+
+ return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+
+ kfree(pkind->rsrc.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000000..9d7c135c7965
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+ u64 start;
+ u64 end;
+};
+
+struct hw_reg_map {
+ u8 regblk;
+ u8 num_ranges;
+ u64 mask;
+#define MAX_REG_RANGES 8
+ struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+ int idx;
+ struct hw_reg_map *map;
+
+ /* Only 64bit offsets */
+ if (reg & 0x07)
+ return false;
+
+ if (regmap == TXSCHQ_HWREGMAP) {
+ if (regblk >= NIX_TXSCH_LVL_CNT)
+ return false;
+ map = &txsch_reg_map[regblk];
+ } else {
+ return false;
+ }
+
+ /* Should never happen */
+ if (map->regblk != regblk)
+ return false;
+
+ reg &= map->mask;
+
+ for (idx = 0; idx < map->num_ranges; idx++) {
+ if (reg >= map->range[idx].start &&
+ reg < map->range[idx].end)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000000..09a8d61f3144
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_BLK_RST (0x46000)
+
+#define NDC_AF_BLK_RST (0x002F0)
+#define NPC_AF_BLK_RST (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG (0x00000)
+#define NPC_AF_ACTIVE_PC (0x00010)
+#define NPC_AF_CONST (0x00020)
+#define NPC_AF_CONST1 (0x00030)
+#define NPC_AF_BLK_RST (0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG (0x00600)
+#define NPC_AF_PCK_DEF_OL2 (0x00610)
+#define NPC_AF_PCK_DEF_OIP4 (0x00620)
+#define NPC_AF_PCK_DEF_OIP6 (0x00630)
+#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
+ (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
+ (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
+ (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+ (0x1880000 | (a) << 8 | (b) << 4)
+#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+ (0x1900008 | (a) << 8 | (b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000)
+#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000)
+#define NPC_AF_DBG_STATUS (0x3000010)
+#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000)
+#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000000..f920dac74e6c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,917 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC0 = 0xcULL,
+ BLKADDR_NDC1 = 0xdULL,
+ BLKADDR_NDC2 = 0xeULL,
+ BLK_COUNT = 0xfULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+ NPA_AQ_COMP_NOTDONE = 0x0,
+ NPA_AQ_COMP_GOOD = 0x1,
+ NPA_AQ_COMP_SWERR = 0x2,
+ NPA_AQ_COMP_CTX_POISON = 0x3,
+ NPA_AQ_COMP_CTX_FAULT = 0x4,
+ NPA_AQ_COMP_LOCKERR = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+ NPA_AQ_CTYPE_AURA = 0x0,
+ NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+ NPA_AQ_INSTOP_NOP = 0x0,
+ NPA_AQ_INSTOP_INIT = 0x1,
+ NPA_AQ_INSTOP_WRITE = 0x2,
+ NPA_AQ_INSTOP_READ = 0x3,
+ NPA_AQ_INSTOP_LOCK = 0x4,
+ NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_17_23 : 7;
+ u64 lf : 9;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+struct npa_aura_s {
+ u64 pool_addr; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 avg_level : 8;
+ u64 reserved_118_119 : 2;
+ u64 shift : 6;
+ u64 aura_drop : 8;
+ u64 reserved_98_103 : 6;
+ u64 bp_ena : 2;
+ u64 aura_drop_ena : 1;
+ u64 pool_drop_ena : 1;
+ u64 reserved_93 : 1;
+ u64 avg_con : 9;
+ u64 pool_way_mask : 16;
+ u64 pool_caching : 1;
+ u64 reserved_65 : 2;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 pool_way_mask : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 2;
+ u64 reserved_98_103 : 6;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 reserved_189_191 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix0_bpid : 9;
+ u64 reserved_164_167 : 4;
+ u64 count : 36;
+#else
+ u64 count : 36;
+ u64 reserved_164_167 : 4;
+ u64 nix0_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_189_191 : 3;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_252_255 : 4;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_up_crossing : 1;
+ u64 fc_ena : 1;
+ u64 reserved_240_243 : 4;
+ u64 bp : 8;
+ u64 reserved_228_231 : 4;
+ u64 limit : 36;
+#else
+ u64 limit : 36;
+ u64 reserved_228_231 : 4;
+ u64 bp : 8;
+ u64 reserved_240_243 : 4;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+#endif
+ u64 fc_addr; /* W4 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 reserved_379_383 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_363 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 update_time : 16;
+ u64 pool_drop : 8;
+#else
+ u64 pool_drop : 8;
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 reserved_420_447 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_420_447 : 28;
+#endif
+ u64 reserved_448_511; /* W7 */
+};
+
+struct npa_pool_s {
+ u64 stack_base; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 reserved_115_127 : 13;
+ u64 buf_size : 11;
+ u64 reserved_100_103 : 4;
+ u64 buf_offset : 12;
+ u64 stack_way_mask : 16;
+ u64 reserved_70_71 : 3;
+ u64 stack_caching : 1;
+ u64 reserved_66_67 : 2;
+ u64 nat_align : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_70_71 : 3;
+ u64 stack_way_mask : 16;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 11;
+ u64 reserved_115_127 : 13;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 stack_pages : 32;
+ u64 stack_max_pages : 32;
+#else
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_240_255 : 16;
+ u64 op_pc : 48;
+#else
+ u64 op_pc : 48;
+ u64 reserved_240_255 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 reserved_316_319 : 4;
+ u64 update_time : 16;
+ u64 reserved_297_299 : 3;
+ u64 fc_up_crossing : 1;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_ena : 1;
+ u64 avg_con : 9;
+ u64 avg_level : 8;
+ u64 reserved_270_271 : 2;
+ u64 shift : 6;
+ u64 reserved_260_263 : 4;
+ u64 stack_offset : 4;
+#else
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 reserved_297_299 : 3;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+#endif
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 reserved_571_575 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_555 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 reserved_512_535 : 24;
+#else
+ u64 reserved_512_535 : 24;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 reserved_612_639 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_612_639 : 28;
+#endif
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+ NIX_AQ_COMP_NOTDONE = 0x0,
+ NIX_AQ_COMP_GOOD = 0x1,
+ NIX_AQ_COMP_SWERR = 0x2,
+ NIX_AQ_COMP_CTX_POISON = 0x3,
+ NIX_AQ_COMP_CTX_FAULT = 0x4,
+ NIX_AQ_COMP_LOCKERR = 0x5,
+ NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+ NIX_AQ_CTYPE_RQ = 0x0,
+ NIX_AQ_CTYPE_SQ = 0x1,
+ NIX_AQ_CTYPE_CQ = 0x2,
+ NIX_AQ_CTYPE_MCE = 0x3,
+ NIX_AQ_CTYPE_RSS = 0x4,
+ NIX_AQ_CTYPE_DYNO = 0x5,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+ NIX_AQ_INSTOP_NOP = 0x0,
+ NIX_AQ_INSTOP_INIT = 0x1,
+ NIX_AQ_INSTOP_WRITE = 0x2,
+ NIX_AQ_INSTOP_READ = 0x3,
+ NIX_AQ_INSTOP_LOCK = 0x4,
+ NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_15_23 : 9;
+ u64 lf : 7;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 7;
+ u64 reserved_15_23 : 9;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+ u64 base;
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 wrptr : 20;
+ u64 avg_con : 9;
+ u64 cint_idx : 7;
+ u64 cq_err : 1;
+ u64 qint_idx : 7;
+ u64 rsvd_81_83 : 3;
+ u64 bpid : 9;
+ u64 rsvd_69_71 : 3;
+ u64 bp_ena : 1;
+ u64 rsvd_64_67 : 4;
+#else
+ u64 rsvd_64_67 : 4;
+ u64 bp_ena : 1;
+ u64 rsvd_69_71 : 3;
+ u64 bpid : 9;
+ u64 rsvd_81_83 : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 update_time : 16;
+ u64 avg_level : 8;
+ u64 head : 20;
+ u64 tail : 20;
+#else
+ u64 tail : 20;
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 cq_err_int_ena : 8;
+ u64 cq_err_int : 8;
+ u64 qsize : 4;
+ u64 rsvd_233_235 : 3;
+ u64 caching : 1;
+ u64 substream : 20;
+ u64 rsvd_210_211 : 2;
+ u64 ena : 1;
+ u64 drop_ena : 1;
+ u64 drop : 8;
+ u64 dp : 8;
+#else
+ u64 dp : 8;
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 rsvd_210_211 : 2;
+ u64 substream : 20;
+ u64 caching : 1;
+ u64 rsvd_233_235 : 3;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+#endif
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 wqe_aura : 20;
+ u64 substream : 20;
+ u64 cq : 20;
+ u64 ena_wqwd : 1;
+ u64 ipsech_ena : 1;
+ u64 sso_ena : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 substream : 20;
+ u64 wqe_aura : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 rsvd_127_122 : 6;
+ u64 lpb_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 xqe_drop_ena : 1;
+ u64 wqe_caching : 1;
+ u64 pb_caching : 2;
+ u64 sso_tt : 2;
+ u64 sso_grp : 10;
+ u64 lpb_aura : 20;
+ u64 spb_aura : 20;
+#else
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 rsvd_127_122 : 6;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 xqe_hdr_split : 1;
+ u64 xqe_imm_copy : 1;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_size : 6;
+ u64 later_skip : 6;
+ u64 rsvd_171 : 1;
+ u64 first_skip : 7;
+ u64 lpb_sizem1 : 12;
+ u64 spb_ena : 1;
+ u64 rsvd_150_148 : 3;
+ u64 wqe_skip : 2;
+ u64 spb_sizem1 : 6;
+ u64 rsvd_139_128 : 12;
+#else
+ u64 rsvd_139_128 : 12;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 spb_pool_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 xqe_pass : 8;
+ u64 xqe_drop : 8;
+#else
+ u64 xqe_drop : 8;
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 rsvd_319_315 : 5;
+ u64 qint_idx : 7;
+ u64 rq_int_ena : 8;
+ u64 rq_int : 8;
+ u64 rsvd_291_288 : 4;
+ u64 lpb_pool_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_aura_drop : 8;
+#else
+ u64 lpb_aura_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 rsvd_383_366 : 18;
+ u64 flow_tagw : 6;
+ u64 bad_utag : 8;
+ u64 good_utag : 8;
+ u64 ltag : 24;
+#else
+ u64 ltag : 24;
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 rsvd_383_366 : 18;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_447_432 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_511_496 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_octs : 48;
+#else
+ u64 drop_octs : 48;
+ u64 rsvd_575_560 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 drop_pkts : 48;
+#else
+ u64 drop_pkts : 48;
+ u64 rsvd_639_624 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 re_pkts : 48;
+#else
+ u64 re_pkts : 48;
+ u64 rsvd_703_688 : 16;
+#endif
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+ NIX_MAXSQESZ_W16 = 0x0,
+ NIX_MAXSQESZ_W8 = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+ NIX_STYPE_STF = 0x0,
+ NIX_STYPE_STT = 0x1,
+ NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 sqe_way_mask : 16;
+ u64 cq : 20;
+ u64 sdp_mcast : 1;
+ u64 substream : 20;
+ u64 qint_idx : 6;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 sqb_count : 16;
+ u64 default_chan : 12;
+ u64 smq_rr_quantum : 24;
+ u64 sso_ena : 1;
+ u64 xoff : 1;
+ u64 cq_ena : 1;
+ u64 smq : 9;
+#else
+ u64 smq : 9;
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_quantum : 24;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 rsvd_191 : 1;
+ u64 sqe_stype : 2;
+ u64 sq_int_ena : 8;
+ u64 sq_int : 8;
+ u64 sqb_aura : 20;
+ u64 smq_rr_count : 25;
+#else
+ u64 smq_rr_count : 25;
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 rsvd_255_253 : 3;
+ u64 smq_next_sq_vld : 1;
+ u64 smq_pend : 1;
+ u64 smenq_next_sqb_vld : 1;
+ u64 head_offset : 6;
+ u64 smenq_offset : 6;
+ u64 tail_offset : 6;
+ u64 smq_lso_segnum : 8;
+ u64 smq_next_sq : 20;
+ u64 mnq_dis : 1;
+ u64 lmt_dis : 1;
+ u64 cq_limit : 8;
+ u64 max_sqe_size : 2;
+#else
+ u64 max_sqe_size : 2;
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+#endif
+ u64 next_sqb : 64;/* W4 */
+ u64 tail_sqb : 64;/* W5 */
+ u64 smenq_sqb : 64;/* W6 */
+ u64 smenq_next_sqb : 64;/* W7 */
+ u64 head_sqb : 64;/* W8 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_630 : 10;
+ u64 vfi_lso_vld : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_total : 18;
+ u64 rsvd_583_576 : 8;
+#else
+ u64 rsvd_583_576 : 8;
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 scm_lso_rem : 18;
+#else
+ u64 scm_lso_rem : 18;
+ u64 rsvd_703_658 : 46;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_767_752 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_831_816 : 16;
+#endif
+ u64 rsvd_895_832 : 64;/* W13 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
+ u64 rsvd_959_944 : 16;
+ u64 dropped_octs : 48;
+#else
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
+ u64 rsvd_1023_1008 : 16;
+ u64 dropped_pkts : 48;
+#else
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+#endif
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint32_t reserved_20_31 : 12;
+ uint32_t rq : 20;
+#else
+ uint32_t rq : 20;
+ uint32_t reserved_20_31 : 12;
+
+#endif
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ uint64_t next : 16;
+ uint64_t pf_func : 16;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t index : 20;
+ uint64_t eol : 1;
+ uint64_t rsvd_2 : 1;
+ uint64_t op : 2;
+#else
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+#endif
+};
+
+enum nix_lsoalg {
+ NIX_LSOALG_NOP,
+ NIX_LSOALG_ADD_SEGNUM,
+ NIX_LSOALG_ADD_PAYLEN,
+ NIX_LSOALG_ADD_OFFSET,
+ NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+ NIX_TXLAYER_OL3,
+ NIX_TXLAYER_OL4,
+ NIX_TXLAYER_IL3,
+ NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_19_63 : 45;
+ u64 alg : 3;
+ u64 rsvd_14_15 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_10_11 : 2;
+ u64 layer : 2;
+ u64 offset : 8;
+#else
+ u64 offset : 8;
+ u64 layer : 2;
+ u64 rsvd_10_11 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_14_15 : 2;
+ u64 alg : 3;
+ u64 rsvd_19_63 : 45;
+#endif
+};
+
+struct nix_rx_flowkey_alg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_35_63 :29;
+ u64 ltype_match :4;
+ u64 ltype_mask :4;
+ u64 sel_chan :1;
+ u64 ena :1;
+ u64 reserved_24_24 :1;
+ u64 lid :3;
+ u64 bytesm1 :5;
+ u64 hdr_offset :8;
+ u64 fn_mask :1;
+ u64 ln_mask :1;
+ u64 key_offset :6;
+#else
+ u64 key_offset :6;
+ u64 ln_mask :1;
+ u64 fn_mask :1;
+ u64 hdr_offset :8;
+ u64 bytesm1 :5;
+ u64 lid :3;
+ u64 reserved_24_24 :1;
+ u64 ena :1;
+ u64 sel_chan :1;
+ u64 ltype_mask :4;
+ u64 ltype_match :4;
+ u64 reserved_35_63 :29;
+#endif
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+ VTAGSIZE_T4 = 0x0,
+ VTAGSIZE_T8 = 0x1,
+};
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3a9730612a70..0bd4351b2a49 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -988,8 +988,8 @@ static int pxa168_init_phy(struct net_device *dev)
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
- ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
- PHY_BASIC_FEATURES);
+ bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
cmd.base.autoneg = AUTONEG_ENABLE;
if (cmd.base.speed != 0)
@@ -1260,7 +1260,8 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;