summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cavium
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 20:11:24 +0300
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /drivers/net/ethernet/cavium
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
downloadlinux-687ee0ad4e897e29f4b41f7a20c866d74c5e0660.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'drivers/net/ethernet/cavium')
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c1237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h604
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c266
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c513
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1128
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h34
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h59
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c117
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h114
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c46
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c170
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h87
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c433
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h15
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c77
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c89
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c460
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h33
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c235
36 files changed, 5236 insertions, 1086 deletions
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 0ef232d3331e..92f411c9f0df 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX
depends on 64BIT
select PHYLIB
select MDIO_THUNDER
+ select THUNDER_NIC_RGX
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
+config THUNDER_NIC_RGX
+ tristate "Thunder MAC interface driver (RGX)"
+ depends on 64BIT
+ select PHYLIB
+ select MDIO_THUNDER
+ ---help---
+ This driver supports configuring XCV block of RGX interface
+ present on CN81XX chip.
+
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
@@ -48,7 +58,7 @@ config LIQUIDIO
select LIBCRC32C
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
- based on CN66XX and CN68XX chips.
+ based on CN66XX, CN68XX and CN23XX chips.
To compile this driver as a module, choose M here: the module
will be called liquidio. This is recommended.
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index 2f366806835d..5a27b2a44039 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -3,14 +3,16 @@
#
obj-$(CONFIG_LIQUIDIO) += liquidio.o
-liquidio-objs := lio_main.o \
- lio_ethtool.o \
- request_manager.o \
- response_manager.o \
- octeon_device.o \
- cn66xx_device.o \
- cn68xx_device.o \
- octeon_mem_ops.o \
- octeon_droq.o \
- octeon_console.o \
- octeon_nic.o
+liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
+ lio_core.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ cn23xx_pf_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_nic.o
+
+liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
new file mode 100644
index 000000000000..bddb198c0b74
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -0,0 +1,1237 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_pf_device.h"
+#include "octeon_main.h"
+
+#define RESET_NOTDONE 0
+#define RESET_DONE 1
+
+/* Change the value of SLI Packet Input Jabber Register to allow
+ * VXLAN TSO packets which can be 64424 bytes, exceeding the
+ * MAX_GSO_SIZE we supplied to the kernel
+ */
+#define CN23XX_INPUT_JABBER 64600
+
+#define LIOLUT_RING_DISTRIBUTION 9
+const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = {
+ 0, 8, 4, 2, 2, 2, 1, 1, 1
+};
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
+{
+ int i = 0;
+ u32 regval = 0;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ /*In cn23xx_soft_reset*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
+ "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
+
+ /*In cn23xx_set_dpi_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
+ lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_ENB", i,
+ CN23XX_DPI_DMA_ENG_ENB(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_BUF", i,
+ CN23XX_DPI_DMA_ENG_BUF(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
+ CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
+
+ /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_CONFIG_PCIE_DEVCTL",
+ CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
+
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
+ CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
+
+ /*In cn23xx_specific_regs_setup */
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
+ CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
+ (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+
+ /*In cn23xx_setup_global_mac_regs*/
+ for (i = 0; i < CN23XX_MAX_MACS; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_MAC_RINFO64", i,
+ CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64
+ (i, oct->pf_num))));
+ }
+
+ /*In cn23xx_setup_global_input_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_PKT_CONTROL64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
+ }
+
+ /*In cn23xx_setup_global_output_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_CONTROL", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
+ }
+
+ /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_enb_reg64",
+ CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_sum_reg64",
+ CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
+
+ /*In cn23xx_setup_iq_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_IQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_DOORBELL", i,
+ CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_DOORBELL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_INSTR_COUNT64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
+ }
+
+ /*In cn23xx_setup_oq_regs*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_OQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_SENT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_CREDIT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_TIME_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_CNT_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
+}
+
+static int cn23xx_pf_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
+ oct->octeon_id);
+
+ octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
+
+ /* Initiate chip-wide soft reset */
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
+
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
+ oct->octeon_id);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
+ oct->octeon_id);
+
+ /* restore the reset value*/
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+static void cn23xx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 regval;
+ u32 uncorrectable_err_mask, corrtable_err_status;
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
+ uncorrectable_err_mask = 0;
+ corrtable_err_status = 0;
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
+ &uncorrectable_err_mask);
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
+ &corrtable_err_status);
+ dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
+ "\tdev_ctl_status_reg = 0x%08x\n"
+ "\tuncorrectable_error_mask_reg = 0x%08x\n"
+ "\tcorrectable_error_status_reg = 0x%08x\n",
+ regval, uncorrectable_err_mask,
+ corrtable_err_status);
+ }
+
+ regval |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
+ oct->octeon_id);
+ pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
+
+static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
+ * for SLI.
+ */
+
+ /* TBD: get the info in Hand-shake */
+ return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
+}
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
+
+ oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+
+ /* programming SRN and TRS for each MAC(0..3) */
+
+ dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
+ __func__, mac_no);
+ /* By default, mapping all 64 IOQs to a single MACs */
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
+
+ if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ } else {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
+ }
+
+ /* setting TRS <23:16> */
+ reg_val = reg_val |
+ (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+ /* write these settings to MAC register */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
+ reg_val);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
+ mac_no, pf_num, (u64)octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
+}
+
+static int cn23xx_reset_io_queues(struct octeon_device *oct)
+{
+ int ret_val = 0;
+ u64 d64;
+ u32 q_no, srn, ern;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ /*As per HRM reg description, s/w cant write 0 to ENB. */
+ /*to make the queue off, need to set the RST bit. */
+
+ /* Reset the Enable bit for all the 64 IQs. */
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
+ }
+
+ /*wait until the RST bit is clear or the RST and quite bits are set*/
+ for (q_no = srn; q_no < ern; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
+{
+ u32 q_no, ern, srn;
+ u64 pf_num;
+ u64 intr_threshold, reg_val;
+ struct octeon_instr_queue *iq;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ pf_num = oct->pf_num;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (cn23xx_reset_io_queues(oct))
+ return -1;
+
+ /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues.Only PF can set these bits.
+ * bits 29:30 indicate the MAC num.
+ * bits 32:47 indicate the PVF num.
+ */
+ for (q_no = 0; q_no < ern; q_no++) {
+ reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+ reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * pf queues
+ */
+ for (q_no = srn; q_no < ern; q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ iq = oct->instr_queue[q_no];
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ /* Set WMARK level for triggering PI_INT */
+ /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no, ern, srn;
+ u64 time_threshold;
+
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
+ } else {
+ /** Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
+ }
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+
+ /* Enabling these interrupt in oct->fn_list.enable_interrupt()
+ * routine which called after IOQ init.
+ * Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ (time_threshold << 32)));
+ }
+
+ /** Setting the water mark level for pko back pressure **/
+ writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
+
+ /** Disabling setting OQs in reset when ring has no dorebells
+ * enabling this will cause of head of line blocking
+ */
+ /* Do it only for pass1.1. and pass1.2 */
+ if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
+ (oct->rev_id == OCTEON_CN23XX_REV_1_1))
+ writeq(readq((u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_GBL_CONTROL) | 0x2,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
+
+ /** Enable channel-level backpressure */
+ if (oct->pf_num)
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
+ else
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
+}
+
+static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
+{
+ cn23xx_enable_error_reporting(oct);
+
+ /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ cn23xx_setup_global_mac_regs(oct);
+
+ if (cn23xx_pf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_pf_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
+ CN23XX_SLI_WINDOW_CTL_DEFAULT);
+
+ /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
+ return 0;
+}
+
+static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ iq_no += oct->sriov_info.pf_srn;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ } else {
+ /* Clear the count by writing back what we read, but don't
+ * enable interrupts
+ */
+ writeq(pkt_in_done, iq->inst_cnt_reg);
+ }
+
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 reg_val;
+ struct octeon_droq *droq = oct->droq[oq_no];
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 time_threshold;
+ u64 cnt_threshold;
+
+ oq_no += oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ if (!oct->msix_on) {
+ /* Enable this output queue to generate Packet Timer Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+
+ /* Enable this output queue to generate Packet Count Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+ } else {
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+ cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
+ ((time_threshold << 32 | cnt_threshold)));
+ }
+}
+
+static int cn23xx_enable_io_queues(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u32 srn, ern, q_no;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
+ /* IOQs are in reset by default in PEM2 mode,
+ * clearing reset bit
+ */
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ reg_val = octeon_read_csr64(
+ oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ }
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = srn; q_no < ern; q_no++) {
+ u32 reg_val;
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+ return 0;
+}
+
+static void cn23xx_disable_io_queues(struct octeon_device *oct)
+{
+ int q_no, loop;
+ u64 d64;
+ u32 d32;
+ u32 srn, ern;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ /*** Disable Input Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* start the Reset for a particular ring */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ WRITE_ONCE(d64, READ_ONCE(d64) &
+ (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
+ WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(d64));
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Input Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
+ while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+ }
+
+ /*** Disable Output Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.It given that SLI_PKT_RING_RST is
+ * common for both IQs and OQs
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Output Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+ while (octeon_read_csr64(oct,
+ CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ WRITE_ONCE(d32, octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
+ READ_ONCE(d32));
+ }
+}
+
+static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 pkts_sent;
+ u64 ret = 0;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+
+ if (!droq) {
+ dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
+ oct->pf_num, ioq_vector->ioq_num);
+ return 0;
+ }
+
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int.*/
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ /* Never need to handle msix mbox intr for pf. They arrive on the last
+ * msix
+ */
+ return ret;
+}
+
+static irqreturn_t cn23xx_interrupt_handler(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr64;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ intr64 = readq(cn23xx->intr_sum_reg64);
+
+ oct->int_status = 0;
+
+ if (intr64 & CN23XX_INTR_ERR)
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
+ oct->octeon_id, CVM_CAST64(intr64));
+
+ if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
+ if (intr64 & CN23XX_INTR_PKT_DATA)
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & (CN23XX_INTR_DMA0_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+ if (intr64 & (CN23XX_INTR_DMA1_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn23xx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid)
+{
+ u64 bar1;
+ u64 reg_adr;
+
+ if (!valid) {
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ return;
+ }
+
+ /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
+ * bits <41:22> of the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+
+ WRITE_ONCE(bar1, lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
+}
+
+static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
+{
+ lio_pci_writeq(oct, mask,
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+/* always call with lock held */
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 new_idx;
+ u32 last_done;
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupt */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Disable Interrupts */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(0, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+}
+
+static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
+ oct->pcie_port);
+}
+
+static void cn23xx_get_pf_num(struct octeon_device *oct)
+{
+ u32 fdl_bit = 0;
+
+ /** Read Function Dependency Link reg to get the function number */
+ pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit);
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+}
+
+static void cn23xx_setup_reg_address(struct octeon_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ oct->reg_list.pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
+ oct->reg_list.pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
+ oct->reg_list.pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
+
+ oct->reg_list.pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
+ oct->reg_list.pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
+ oct->reg_list.pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
+
+ oct->reg_list.pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
+ oct->reg_list.pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
+ oct->reg_list.pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
+
+ oct->reg_list.pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
+ oct->reg_list.pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
+ oct->reg_list.pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
+
+ cn23xx_get_pcie_qlmport(oct);
+
+ cn23xx->intr_mask64 = CN23XX_INTR_MASK;
+ if (!oct->msix_on)
+ cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
+ cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
+
+ cn23xx->intr_sum_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ cn23xx->intr_enb_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+}
+
+static int cn23xx_sriov_config(struct octeon_device *oct)
+{
+ u32 total_rings;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ /* num_vfs is already filled for us */
+ u32 pf_srn, num_pf_rings;
+
+ cn23xx->conf =
+ (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+ switch (oct->rev_id) {
+ case OCTEON_CN23XX_REV_1_0:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ break;
+ case OCTEON_CN23XX_REV_1_1:
+ total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ break;
+ default:
+ total_rings = CN23XX_MAX_RINGS_PER_PF;
+ break;
+ }
+ if (!oct->sriov_info.num_pf_rings) {
+ if (total_rings > num_present_cpus())
+ num_pf_rings = num_present_cpus();
+ else
+ num_pf_rings = total_rings;
+ } else {
+ num_pf_rings = oct->sriov_info.num_pf_rings;
+
+ if (num_pf_rings > total_rings) {
+ dev_warn(&oct->pci_dev->dev,
+ "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n",
+ num_pf_rings, total_rings);
+ num_pf_rings = total_rings;
+ }
+ }
+
+ total_rings = num_pf_rings;
+ /* the first ring of the pf */
+ pf_srn = total_rings - num_pf_rings;
+
+ oct->sriov_info.trs = total_rings;
+ oct->sriov_info.pf_srn = pf_srn;
+ oct->sriov_info.num_pf_rings = num_pf_rings;
+ dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n",
+ oct->sriov_info.trs, oct->sriov_info.pf_srn,
+ oct->sriov_info.num_pf_rings);
+ return 0;
+}
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
+{
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ cn23xx_get_pf_num(oct);
+
+ if (cn23xx_sriov_config(oct)) {
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+ oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
+ oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
+
+ oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
+ oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
+
+ cn23xx_setup_reg_address(oct);
+
+ oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx)
+{
+ if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf23xx),
+ CN23XX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf23xx),
+ CN23XX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INFO_PTR(conf23xx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+void cn23xx_dump_iq_regs(struct octeon_device *oct)
+{
+ u32 regval, q_no;
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_DOORBELL(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_DOORBELL(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_BASE_ADDR64(0),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_BASE_ADDR64(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_IQ_SIZE(0),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n",
+ CN23XX_SLI_CTL_STATUS,
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS)));
+
+ for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) {
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n",
+ q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CVM_CAST64(octeon_read_csr64
+ (oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no))));
+ }
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n",
+ CN23XX_CONFIG_PCIE_DEVCTL, regval);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ CVM_CAST64(lio_pci_readq(
+ oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n",
+ oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+}
+
+int cn23xx_fw_loaded(struct octeon_device *oct)
+{
+ u64 val;
+
+ val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1);
+ return (val >> 1) & 1ULL;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
new file mode 100644
index 000000000000..21b5c9051967
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -0,0 +1,59 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+*/
+
+#ifndef __CN23XX_PF_DEVICE_H__
+#define __CN23XX_PF_DEVICE_H__
+
+#include "cn23xx_pf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_pf {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+};
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx);
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+
+int cn23xx_fw_loaded(struct octeon_device *oct);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
new file mode 100644
index 000000000000..03d79d95ab75
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -0,0 +1,604 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn23xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX devices.
+*/
+
+#ifndef __CN23XX_PF_REGS_H__
+#define __CN23XX_PF_REGS_H__
+
+#define CN23XX_CONFIG_VENDOR_ID 0x00
+#define CN23XX_CONFIG_DEVICE_ID 0x02
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_MSIX_CAP 0x50
+#define CN23XX_CONFIG_MSIX_LMSI 0x54
+#define CN23XX_CONFIG_MSIX_UMSI 0x58
+#define CN23XX_CONFIG_MSIX_MSIMD 0x5C
+#define CN23XX_CONFIG_MSIX_MSIMM 0x60
+#define CN23XX_CONFIG_MSIX_MSIMP 0x64
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+#define CN23XX_CONFIG_PCIE_DEVCTL2 0x98
+#define CN23XX_CONFIG_PCIE_LINKCTL2 0xA0
+#define CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK 0x108
+#define CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS 0x110
+#define CN23XX_CONFIG_PCIE_DEVCTL_MASK 0x00040000
+
+#define CN23XX_PCIE_SRIOV_FDL 0x188
+#define CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN23XX_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+#define CN23XX_CONFIG_SRIOV_VFDEVID 0x190
+
+#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C
+#define CN23XX_CONFIG_SRIOV_BARX(i) \
+ (CN23XX_CONFIG_SRIOV_BAR_START + (i * 4))
+#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08
+#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04
+#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01
+
+/* ############## BAR0 Registers ################ */
+
+#define CN23XX_SLI_CTL_PORT_START 0x286E0
+#define CN23XX_PORT_OFFSET 0x10
+
+#define CN23XX_SLI_CTL_PORT(p) \
+ (CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
+
+/* 2 scatch registers (64-bit) */
+#define CN23XX_SLI_WINDOW_CTL 0x282E0
+#define CN23XX_SLI_SCRATCH1 0x283C0
+#define CN23XX_SLI_SCRATCH2 0x283D0
+#define CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL
+
+/* 1 registers (64-bit) - SLI_CTL_STATUS */
+#define CN23XX_SLI_CTL_STATUS 0x28570
+
+/* SLI Packet Input Jabber Register (64 bit register)
+ * <31:0> for Byte count for limiting sizes of packet sizes
+ * that are allowed for sli packet inbound packets.
+ * the default value is 0xFA00(=64000).
+ */
+#define CN23XX_SLI_PKT_IN_JABBER 0x29170
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this need to be reduced to 60000
+ * in order to to H/W TSO and avoid the WQE malfarmation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+#define CN23XX_WIN_WR_ADDR_LO 0x20000
+#define CN23XX_WIN_WR_ADDR_HI 0x20004
+#define CN23XX_WIN_WR_ADDR64 CN23XX_WIN_WR_ADDR_LO
+
+#define CN23XX_WIN_RD_ADDR_LO 0x20010
+#define CN23XX_WIN_RD_ADDR_HI 0x20014
+#define CN23XX_WIN_RD_ADDR64 CN23XX_WIN_RD_ADDR_LO
+
+#define CN23XX_WIN_WR_DATA_LO 0x20020
+#define CN23XX_WIN_WR_DATA_HI 0x20024
+#define CN23XX_WIN_WR_DATA64 CN23XX_WIN_WR_DATA_LO
+
+#define CN23XX_WIN_RD_DATA_LO 0x20040
+#define CN23XX_WIN_RD_DATA_HI 0x20044
+#define CN23XX_WIN_RD_DATA64 CN23XX_WIN_RD_DATA_LO
+
+#define CN23XX_WIN_WR_MASK_LO 0x20030
+#define CN23XX_WIN_WR_MASK_HI 0x20034
+#define CN23XX_WIN_WR_MASK_REG CN23XX_WIN_WR_MASK_LO
+#define CN23XX_SLI_MAC_CREDIT_CNT 0x23D70
+
+/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
+ * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
+ */
+#define CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030
+
+/*1 register (64-bit) to determine whether IOQs are in reset. */
+#define CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_MAC_RINFO_OFFSET 0x20
+#define CN23XX_PF_RINFO_OFFSET 0x10
+
+#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \
+ (CN23XX_SLI_PKT_MAC_RINFO_START64 + \
+ ((mac) * CN23XX_MAC_RINFO_OFFSET) + \
+ ((pf) * CN23XX_PF_RINFO_OFFSET))
+
+/** mask for total rings, setting TRS to base */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16)
+/** mask for starting ring number: setting SRN <6:0> = 0x7F */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN (0x7F)
+
+/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16
+/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS 0
+/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS 32
+/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS 48
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/** These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN23XX_SLI_OQ_WMARK 0x29180
+
+/* Global pkt control register */
+#define CN23XX_SLI_GBL_CONTROL 0x29210
+
+/* Backpressure enable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1S 0x29260
+
+/* Backpressure enable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1S 0x29270
+
+/* Backpressure disable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1C 0x29280
+
+/* Backpressure disable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1C 0x29290
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_SLI_MAC_PF_MBOX_INT_START 0x27380
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf) \
+ (CN23XX_SLI_MAC_PF_MBOX_INT_START + \
+ ((mac) * CN23XX_MAC_INT_OFFSET + \
+ (pf) * CN23XX_PF_INT_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN23XX_DMA_CNT_START 0x28400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */
+/* SLI_DMA_0_TIM */
+#define CN23XX_DMA_TIM_START 0x28420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN23XX_DMA_INT_LEVEL_START 0x283E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN23XX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN23XX_DMA_CNT(dq) \
+ (CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_PKT_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIME_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIM(dq) \
+ (CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET))
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+/*######################## INTERRUPTS #########################*/
+#define CN23XX_MAC_INT_OFFSET 0x20
+#define CN23XX_PF_INT_OFFSET 0x10
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN23XX_SLI_INT_SUM64 0x27000
+
+/* 4 registers (64-bit) for Interrupt Enable for each Port */
+#define CN23XX_SLI_INT_ENB64 0x27080
+
+#define CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \
+ (CN23XX_SLI_INT_SUM64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \
+ (CN23XX_SLI_INT_ENB64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
+#define CN23XX_SLI_PKT_CNT_INT 0x29130
+
+/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
+#define CN23XX_SLI_PKT_TIME_INT 0x29140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+#define CN23XX_INTR_RML_TIMEOUT_ERR (1)
+
+#define CN23XX_INTR_MIO_INT BIT(1)
+
+#define CN23XX_INTR_RESERVED1 (3 << 2)
+
+#define CN23XX_INTR_PKT_COUNT BIT(4)
+#define CN23XX_INTR_PKT_TIME BIT(5)
+
+#define CN23XX_INTR_RESERVED2 (3 << 6)
+
+#define CN23XX_INTR_M0UPB0_ERR BIT(8)
+#define CN23XX_INTR_M0UPWI_ERR BIT(9)
+#define CN23XX_INTR_M0UNB0_ERR BIT(10)
+#define CN23XX_INTR_M0UNWI_ERR BIT(11)
+
+#define CN23XX_INTR_RESERVED3 (0xFFFFFULL << 12)
+
+#define CN23XX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN23XX_INTR_DMA1_FORCE BIT_ULL(33)
+
+#define CN23XX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN23XX_INTR_DMA1_COUNT BIT_ULL(35)
+
+#define CN23XX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN23XX_INTR_DMA1_TIME BIT_ULL(37)
+
+#define CN23XX_INTR_RESERVED4 (0x7FFFFULL << 38)
+
+#define CN23XX_INTR_VF_MBOX BIT_ULL(57)
+#define CN23XX_INTR_DMAVF_ERR BIT_ULL(58)
+#define CN23XX_INTR_DMAPF_ERR BIT_ULL(59)
+
+#define CN23XX_INTR_PKTVF_ERR BIT_ULL(60)
+#define CN23XX_INTR_PKTPF_ERR BIT_ULL(61)
+#define CN23XX_INTR_PPVF_ERR BIT_ULL(62)
+#define CN23XX_INTR_PPPF_ERR BIT_ULL(63)
+
+#define CN23XX_INTR_DMA0_DATA (CN23XX_INTR_DMA0_TIME)
+#define CN23XX_INTR_DMA1_DATA (CN23XX_INTR_DMA1_TIME)
+
+#define CN23XX_INTR_DMA_DATA \
+ (CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA)
+
+/* By fault only TIME based */
+#define CN23XX_INTR_PKT_DATA (CN23XX_INTR_PKT_TIME)
+/* For both COUNT and TIME based */
+/* #define CN23XX_INTR_PKT_DATA \
+ * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME)
+ */
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN23XX_INTR_PCIE_DATA \
+ (CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT)
+
+/* Sum of interrupts for error events */
+#define CN23XX_INTR_ERR \
+ (CN23XX_INTR_M0UPB0_ERR | \
+ CN23XX_INTR_M0UPWI_ERR | \
+ CN23XX_INTR_M0UNB0_ERR | \
+ CN23XX_INTR_M0UNWI_ERR | \
+ CN23XX_INTR_DMAVF_ERR | \
+ CN23XX_INTR_DMAPF_ERR | \
+ CN23XX_INTR_PKTPF_ERR | \
+ CN23XX_INTR_PPPF_ERR | \
+ CN23XX_INTR_PPVF_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN23XX_INTR_MASK \
+ (CN23XX_INTR_DMA_DATA | \
+ CN23XX_INTR_DMA0_FORCE | \
+ CN23XX_INTR_DMA1_FORCE | \
+ CN23XX_INTR_MIO_INT | \
+ CN23XX_INTR_ERR)
+
+/* 4 Registers (64 - bit) */
+#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
+#define CN23XX_SLI_S2M_PORTX_CTL(port) \
+ (CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10))
+
+#define CN23XX_SLI_MAC_NUMBER 0x20050
+
+/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as
+ * addr = (0x00011800C0000100 |port <<24 |idx <<3 )
+ * Here, port is PEM(0..3) & idx is INDEX(0..15)
+ */
+#define CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL
+#define CN23XX_PEM_OFFSET 24
+#define CN23XX_BAR1_INDEX_OFFSET 3
+
+#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
+ (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
+ ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+
+/*############################ DPI #########################*/
+
+/* 1 register (64-bit) - provides DMA Enable */
+#define CN23XX_DPI_CTL 0x0001df0000000040ULL
+
+/* 1 register (64-bit) - Controls the DMA IO Operation */
+#define CN23XX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable */
+#define CN23XX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RSP
+ * Indicates which Instr'n Queue received error response from the IO sub-system
+ */
+#define CN23XX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RST
+ * Indicates which Instr'n Queue dropped an Instr'n
+ */
+#define CN23XX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN
+ * Provides DMA Engine Queue Enable
+ */
+#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8))
+
+/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
+ * Provides control bits for transaction on 8 Queues
+ */
+#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL
+#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \
+ (CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8))
+
+/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
+ * Provides DMA Engine FIFO (Queue) Size
+ */
+#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+#define CN23XX_DPI_DMA_ENG_BUF(eng) \
+ (CN23XX_DPI_DMA_ENG0_BUF + (eng * 8))
+
+/* 4 Registers (64-bit) */
+#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
+#define CN23XX_DPI_SLI_PRTX_CFG(port) \
+ (CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8))
+
+/* Masks for DPI_DMA_CONTROL Register */
+#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN23XX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN23XX_DPI_DMA_ENB (0x0FULL << 48)
+/* Set the DMA Control, to update packet count not byte count sent by DMA,
+ * when we use Interrupt Coalescing (CA mode)
+ */
+#define CN23XX_DPI_DMA_O_ADD1 BIT(19)
+/*selecting 64-bit Byte Swap Mode */
+#define CN23XX_DPI_DMA_O_ES BIT(15)
+#define CN23XX_DPI_DMA_O_MODE BIT(14)
+
+#define CN23XX_DPI_DMA_CTL_MASK \
+ (CN23XX_DPI_DMA_COMMIT_MODE | \
+ CN23XX_DPI_DMA_PKT_EN | \
+ CN23XX_DPI_DMA_O_ES | \
+ CN23XX_DPI_DMA_O_MODE)
+
+/*############################ RST #########################*/
+
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+#define CN23XX_RST_SOFT_RST 0x0001180006001680ULL
+
+#define CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index c03d37016a48..e779af88621b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -338,7 +338,7 @@ void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
}
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
{
u32 mask;
@@ -353,6 +353,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
mask |= oct->io_qmask.oq;
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ return 0;
}
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
@@ -418,36 +420,6 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
}
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
-{
- int i;
-
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
- continue;
- oct->fn_list.setup_iq_regs(oct, i);
- }
-
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- oct->fn_list.setup_oq_regs(oct, i);
- }
-
- oct->fn_list.setup_device_regs(oct);
-
- oct->fn_list.enable_interrupt(oct->chip);
-
- oct->fn_list.enable_io_queues(oct);
-
- /* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
- continue;
- writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
- }
-}
-
void
lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
u64 core_addr,
@@ -507,18 +479,20 @@ lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
return new_idx;
}
-void lio_cn6xxx_enable_interrupt(void *chip)
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
/* Enable Interrupt */
writeq(mask, cn6xxx->intr_enb_reg64);
}
-void lio_cn6xxx_disable_interrupt(void *chip)
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
{
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
/* Disable Interrupts */
writeq(0, cn6xxx->intr_enb_reg64);
@@ -714,7 +688,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 28c47224221a..a40a91394079 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -80,18 +80,17 @@ void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
-void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
-void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
u32 idx, int valid);
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
-void lio_cn6xxx_enable_interrupt(void *chip);
-void lio_cn6xxx_disable_interrupt(void *chip);
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 29755bc68f12..dbf3566ead53 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -148,7 +148,6 @@ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
- oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
new file mode 100644
index 000000000000..201eddb3013a
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -0,0 +1,266 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+/*TODO, Use some other pound define to suggest
+ * the fact that iqs are not tied to netdevs
+ * and can take traffic from different netdevs
+ * hence bql reporting is done per packet
+ * than in bulk. Usage of NO_NAPI in txq completion is
+ * a little confusing
+ */
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ mac[0], mac[1],
+ mac[2], mac[3],
+ mac[4], mac[5]);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param1);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param1);
+ netdev->mtu = nctrl->ncmd.s.param1;
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_ID_ACTIVE:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d ADDED\n",
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d DELETED\n",
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 289eb8907922..f163e0abbeb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,6 +32,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
static int octnet_get_link_stats(struct net_device *netdev);
@@ -75,6 +76,7 @@ enum {
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
#define OCT_ETHTOOL_REGSVER 1
/* statistics of PF */
@@ -188,6 +190,10 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
"buffer_alloc_failure",
};
+/* LiquidIO driver private flags */
+static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
+};
+
#define OCTNIC_NCMD_AUTONEG_ON 0x1
#define OCTNIC_NCMD_PHY_ON 0x2
@@ -259,6 +265,13 @@ lio_ethtool_get_channels(struct net_device *dev,
max_tx = CFG_GET_IQ_MAX_Q(conf6x);
rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf23);
+ max_tx = CFG_GET_IQ_MAX_Q(conf23);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
}
channel->max_rx = max_rx;
@@ -290,18 +303,16 @@ lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_board_info *board_info;
- int len;
- if (eeprom->offset != 0)
+ if (eeprom->offset)
return -EINVAL;
eeprom->magic = oct_dev->pci_dev->vendor;
board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
- len =
- sprintf((char *)bytes,
- "boardname:%s serialnum:%s maj:%lld min:%lld\n",
- board_info->name, board_info->serial_number,
- board_info->major, board_info->minor);
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
return 0;
}
@@ -333,6 +344,32 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
return 0;
}
+static int octnet_id_active(struct net_device *netdev, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
+ nctrl.ncmd.s.param1 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Callback for when mdio command response arrives
*/
static void octnet_mdio_resp_callback(struct octeon_device *oct,
@@ -406,7 +443,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
- retval = -EBUSY;
+ retval = -EBUSY;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived
@@ -476,6 +513,11 @@ static int lio_set_phys_id(struct net_device *netdev,
&value);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+
+ /* returns 0 since updates are asynchronous */
+ return 0;
} else {
return -EINVAL;
}
@@ -521,7 +563,10 @@ static int lio_set_phys_id(struct net_device *netdev,
&lio->phy_beacon_val);
if (ret)
return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ return 0;
} else {
return -EINVAL;
}
@@ -550,6 +595,13 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
}
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
@@ -610,6 +662,69 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = oct->rx_pause;
}
+static int
+lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers.
+ */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct oct_link_info *linfo = &lio->linfo;
+
+ int ret = 0;
+
+ if (oct->chip_id != OCTEON_CN23XX_PF_VID)
+ return -EINVAL;
+
+ if (linfo->link.s.duplex == 0) {
+ /*no flow control for half duplex*/
+ if (pause->rx_pause || pause->tx_pause)
+ return -EINVAL;
+ }
+
+ /*do not support autoneg of link flow control*/
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ if (pause->rx_pause) {
+ /*enable rx pause*/
+ nctrl.ncmd.s.param1 = 1;
+ } else {
+ /*disable rx pause*/
+ nctrl.ncmd.s.param1 = 0;
+ }
+
+ if (pause->tx_pause) {
+ /*enable tx pause*/
+ nctrl.ncmd.s.param2 = 1;
+ } else {
+ /*disable tx pause*/
+ nctrl.ncmd.s.param2 = 0;
+ }
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
+ return -EINVAL;
+ }
+
+ oct->rx_pause = pause->rx_pause;
+ oct->tx_pause = pause->tx_pause;
+
+ return 0;
+}
+
static void
lio_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats __attribute__((unused)),
@@ -877,6 +992,27 @@ lio_get_ethtool_stats(struct net_device *netdev,
}
}
+static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
+ sprintf(data, "%s", oct_priv_flags_strings[i]);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ break;
+ }
+}
+
static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct lio *lio = GET_LIO(netdev);
@@ -916,12 +1052,31 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
default:
netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
break;
}
}
+static int lio_get_priv_flags_ss_count(struct lio *lio)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return ARRAY_SIZE(oct_priv_flags_strings);
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ return -EOPNOTSUPP;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
static int lio_get_sset_count(struct net_device *netdev, int sset)
{
struct lio *lio = GET_LIO(netdev);
@@ -932,6 +1087,8 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
return (ARRAY_SIZE(oct_stats_strings) +
ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
default:
return -EOPNOTSUPP;
}
@@ -948,6 +1105,16 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ if (!intrmod_cfg->rx_enable) {
+ intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ intrmod_cfg->rx_frames;
+ }
+ if (!intrmod_cfg->tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ intrmod_cfg->tx_frames;
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX: {
struct octeon_cn6xxx *cn6xxx =
@@ -983,7 +1150,15 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_coalesce_usecs_low =
intrmod_cfg->rx_mintmr_trigger;
intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->rx_mincnt_trigger;
+ intrmod_cfg->rx_mincnt_trigger;
+ }
+ if (OCTEON_CN23XX_PF(oct) &&
+ (intrmod_cfg->tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg->tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg->tx_mincnt_trigger;
}
return 0;
}
@@ -1060,11 +1235,11 @@ static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
- sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
- sc->ctxptr;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl =
+ (struct oct_nic_stats_ctrl *)sc->ctxptr;
struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
@@ -1314,14 +1489,35 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = oct->intrmod.rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ rx_max_coalesced_frames);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.rx_frames = rx_max_coalesced_frames;
+ break;
+ }
default:
return -EINVAL;
}
return 0;
}
-static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
- *intr_coal)
+static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct ethtool_coalesce *intr_coal)
{
struct octeon_device *oct = lio->oct_dev;
u32 time_threshold, rx_coalesce_usecs;
@@ -1346,6 +1542,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
break;
}
+ case OCTEON_CN23XX_PF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = oct->intrmod.rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ time_threshold =
+ cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (oct->intrmod.rx_frames |
+ (time_threshold << 32)));
+ /*consider writing to resend bit here*/
+ }
+ oct->intrmod.rx_usecs = rx_coalesce_usecs;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1358,12 +1575,37 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
__attribute__((unused)))
{
struct octeon_device *oct = lio->oct_dev;
+ u32 iq_intr_pkt;
+ void __iomem *inst_cnt_reg;
+ u64 val;
/* Config Cnt based interrupt values */
switch (oct->chip_id) {
case OCTEON_CN68XX:
case OCTEON_CN66XX:
break;
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->tx_max_coalesced_frames)
+ iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ else
+ iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
+ val = readq(inst_cnt_reg);
+ /*clear wmark and count.dont want to write count back*/
+ val = (val & 0xFFFF000000000000ULL) |
+ ((u64)iq_intr_pkt
+ << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
+ writeq(val, inst_cnt_reg);
+ /*consider setting resend bit*/
+ }
+ oct->intrmod.tx_frames = iq_intr_pkt;
+ break;
+ }
default:
return -EINVAL;
}
@@ -1399,6 +1641,8 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
return -EINVAL;
}
break;
+ case OCTEON_CN23XX_PF_VID:
+ break;
default:
return -EINVAL;
}
@@ -1541,9 +1785,237 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
+static int lio_get_regs_len(struct net_device *dev)
{
- return OCT_ETHTOOL_REGDUMP_LEN;
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX;
+ default:
+ return OCT_ETHTOOL_REGDUMP_LEN;
+ }
+}
+
+static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ u8 pf_num = oct->pf_num;
+ int len = 0;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ /*0x29030 or 0x29040*/
+ reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27080 or 0x27090*/
+ reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27000 or 0x27010*/
+ reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29120*/
+ reg = 0x29120;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27300*/
+ reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
+ oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27200*/
+ reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*29130*/
+ reg = CN23XX_SLI_PKT_CNT_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29140*/
+ reg = CN23XX_SLI_PKT_TIME_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29160*/
+ reg = 0x29160;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29180*/
+ reg = CN23XX_SLI_OQ_WMARK;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x291E0*/
+ reg = CN23XX_SLI_PKT_IOQ_RING_RST;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29210*/
+ reg = CN23XX_SLI_GBL_CONTROL;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29220*/
+ reg = 0x29220;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*PF only*/
+ if (pf_num == 0) {
+ /*0x29260*/
+ reg = CN23XX_SLI_OUT_BP_EN_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ } else if (pf_num == 1) {
+ /*0x29270*/
+ reg = CN23XX_SLI_OUT_BP_EN2_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10080*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10090*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_SIZE(i);
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10050*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10070*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100a0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100b0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100c0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x10000*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10010*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
+ i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10020*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_DOORBELL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10030*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_SIZE(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
}
static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
@@ -1688,6 +2160,10 @@ static void lio_get_regs(struct net_device *dev,
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
+ len += cn23xx_read_csr_reg(regbuf + len, oct);
+ break;
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
@@ -1729,6 +2205,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
.get_strings = lio_get_strings,
.get_ethtool_stats = lio_get_ethtool_stats,
.get_pauseparam = lio_get_pauseparam,
+ .set_pauseparam = lio_set_pauseparam,
.get_regs_len = lio_get_regs_len,
.get_regs = lio_get_regs,
.get_msglevel = lio_get_msglevel,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 20d6942edf40..afc6f9dc8119 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,11 +21,10 @@
**********************************************************************/
#include <linux/version.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/if_vlan.h>
#include <linux/firmware.h>
#include <linux/ptp_clock_kernel.h>
#include <net/vxlan.h>
+#include <linux/kthread.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -37,6 +36,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_device.h"
+#include "cn23xx_pf_device.h"
#include "liquidio_image.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
@@ -52,11 +52,6 @@ module_param(ddr_timeout, int, 0644);
MODULE_PARM_DESC(ddr_timeout,
"Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
-static u32 console_bitmask;
-module_param(console_bitmask, int, 0644);
-MODULE_PARM_DESC(console_bitmask,
- "Bitmask indicating which consoles have debug output redirected to syslog.");
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
@@ -102,6 +97,14 @@ struct liquidio_if_cfg_resp {
u64 status;
};
+struct liquidio_rx_ctl_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -139,7 +142,8 @@ union tx_info {
#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
#define OCTNIC_GSO_MAX_HEADER_SIZE 128
-#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
/** Structure of a node in list of gather components maintained by
* NIC driver for each network device.
@@ -162,27 +166,6 @@ struct octnic_gather {
u64 sg_dma_ptr;
};
-/** This structure is used by NIC driver to store information required
- * to free the sk_buff when the packet has been fetched by Octeon.
- * Bytes offset below assume worst-case of a 64-bit system.
- */
-struct octnet_buf_free_info {
- /** Bytes 1-8. Pointer to network device private structure. */
- struct lio *lio;
-
- /** Bytes 9-16. Pointer to sk_buff. */
- struct sk_buff *skb;
-
- /** Bytes 17-24. Pointer to gather list. */
- struct octnic_gather *g;
-
- /** Bytes 25-32. Physical address of skb->data or gather list. */
- u64 dptr;
-
- /** Bytes 33-47. Piggybacked soft command, if any */
- struct octeon_soft_command *sc;
-};
-
struct handshake {
struct completion init;
struct completion started;
@@ -198,6 +181,7 @@ struct octeon_device_priv {
};
static int octeon_device_init(struct octeon_device *);
+static int liquidio_stop(struct net_device *netdev);
static void liquidio_remove(struct pci_dev *pdev);
static int liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -219,6 +203,20 @@ static void octeon_droq_bh(unsigned long pdev)
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
+ lio_enable_irq(oct->droq[q_no], NULL);
+
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ /* set time and cnt interrupt thresholds for this DROQ
+ * for NAPI
+ */
+ int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
+ 0x5700000040ULL);
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
+ }
}
if (reschedule)
@@ -252,76 +250,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
return pkt_cnt;
}
-void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
- unsigned int bytes_compl)
-{
- struct netdev_queue *netdev_queue = txq;
-
- netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
-}
-
-void octeon_update_tx_completion_counters(void *buf, int reqtype,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb = NULL;
- struct octeon_soft_command *sc;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- (*pkts_compl)++;
- *bytes_compl += skb->len;
-}
-
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
-{
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb;
- struct octeon_soft_command *sc;
- struct netdev_queue *txq;
-
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
-
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
-
- default:
- return;
- }
-
- txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
- netdev_tx_sent_queue(txq, skb->len);
-}
-
-int octeon_console_debug_enabled(u32 console)
-{
- return (console_bitmask >> (console)) & 0x1;
-}
-
/**
* \brief Forces all IO queues off on a given device
* @param oct Pointer to Octeon device
@@ -441,7 +369,7 @@ static void stop_pci_io(struct octeon_device *oct)
pci_disable_device(oct->pci_dev);
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
pcierror_quiesce_device(oct);
@@ -570,6 +498,9 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
{ /* 66xx */
PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
},
+ { /* 23xx pf */
+ PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
{
0, 0, 0, 0, 0, 0, 0
}
@@ -587,7 +518,6 @@ static struct pci_driver liquidio_pci_driver = {
.suspend = liquidio_suspend,
.resume = liquidio_resume,
#endif
-
};
/**
@@ -936,6 +866,52 @@ static void print_link_info(struct net_device *netdev)
}
/**
+ * \brief Routine to notify MTU change
+ * @param work work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * \brief Sets up the mtu status change work
+ * @param netdev network device
+ */
+static inline int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static inline void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
* \brief Update link status
* @param netdev network device
* @param ls link status structure
@@ -973,8 +949,6 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
struct lio *lio;
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
- /*octeon_update_iq_read_idx(oct, iq);*/
-
netdev = oct->props[iq->ifidx].netdev;
/* This is needed because the first IQ does not have
@@ -1002,12 +976,32 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
}
}
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+ return 0;
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
*/
-static
-void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1032,19 +1026,36 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
}
}
+static irqreturn_t
+liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ u64 ret;
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
/**
* \brief Interrupt handler for octeon
* @param irq unused
* @param dev octeon device
*/
static
-irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
+ void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
/* Disable our interrupts for the duration of ISR */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
ret = oct->fn_list.process_interrupt_regs(oct);
@@ -1053,7 +1064,7 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
/* Re-enable our interrupts */
if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
- oct->fn_list.enable_interrupt(oct->chip);
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
return ret;
}
@@ -1067,22 +1078,204 @@ irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
static int octeon_setup_interrupt(struct octeon_device *oct)
{
int irqret, err;
+ struct msix_entry *msix_entries;
+ int i;
+ int num_ioq_vectors;
+ int num_alloc_ioq_vectors;
- err = pci_enable_msi(oct->pci_dev);
- if (err)
- dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
- err);
- else
- oct->flags |= LIO_FLAG_MSI_ENABLED;
-
- irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
- IRQF_SHARED, "octeon", oct);
- if (irqret) {
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
- dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
- irqret);
- return 1;
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
+ /* one non ioq interrupt for handling sli_mac_pf_int_sum */
+ oct->num_msix_irqs += 1;
+
+ oct->msix_entries = kcalloc(
+ oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ return 1;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ /*Assumption is that pf msix vectors start from pf srn to pf to
+ * trs and not from 0. if not change this code
+ */
+ for (i = 0; i < oct->num_msix_irqs - 1; i++)
+ msix_entries[i].entry = oct->sriov_info.pf_srn + i;
+ msix_entries[oct->num_msix_irqs - 1].entry =
+ oct->sriov_info.trs;
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+
+ /** For PF, there is one non-ioq interrupt handler */
+ num_ioq_vectors -= 1;
+ irqret = request_irq(msix_entries[num_ioq_vectors].vector,
+ liquidio_legacy_intr_handler, 0, "octeon",
+ oct);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+
+ for (i = 0; i < num_ioq_vectors; i++) {
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ "octeon", &oct->ioq_vector[i]);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ /** Freeing the non-ioq irq vector here . */
+ free_irq(msix_entries[num_ioq_vectors].vector,
+ oct);
+
+ while (i) {
+ i--;
+ /** clearing affinity mask. */
+ irq_set_affinity_hint(
+ msix_entries[i].vector, NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ return 1;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ (&oct->ioq_vector[i].affinity_mask));
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+ oct->octeon_id);
+ } else {
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq,
+ liquidio_legacy_intr_handler, IRQF_SHARED,
+ "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int liquidio_watchdog(void *param)
+{
+ u64 wdog;
+ u16 mask_of_stuck_cores = 0;
+ u16 mask_of_crashed_cores = 0;
+ int core_num;
+ u8 core_is_stuck[LIO_MAX_CORES];
+ u8 core_crashed[LIO_MAX_CORES];
+ struct octeon_device *oct = param;
+
+ memset(core_is_stuck, 0, sizeof(core_is_stuck));
+ memset(core_crashed, 0, sizeof(core_crashed));
+
+ while (!kthread_should_stop()) {
+ mask_of_crashed_cores =
+ (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+
+ for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
+ if (!core_is_stuck[core_num]) {
+ wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
+
+ /* look at watchdog state field */
+ wdog &= CIU3_WDOG_MASK;
+ if (wdog) {
+ /* this watchdog timer has expired */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_WDOG_EXPIRE;
+ mask_of_stuck_cores |= (1 << core_num);
+ }
+ }
+
+ if (!core_crashed[core_num])
+ core_crashed[core_num] =
+ (mask_of_crashed_cores >> core_num) & 1;
+ }
+
+ if (mask_of_stuck_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_is_stuck[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d is stuck!\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_is_stuck[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+
+ if (mask_of_crashed_cores) {
+ for (core_num = 0; core_num < LIO_MAX_CORES;
+ core_num++) {
+ if (core_crashed[core_num] == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
+ core_num);
+ /* 2 means we have printk'd an error
+ * so no need to repeat the same printk
+ */
+ core_crashed[core_num] =
+ LIO_MONITOR_CORE_STUCK_MSGD;
+ }
+ }
+ }
+#ifdef CONFIG_MODULE_UNLOAD
+ if (mask_of_stuck_cores || mask_of_crashed_cores) {
+ /* make module refcount=0 so that rmmod will work */
+ long refcount;
+
+ refcount = module_refcount(THIS_MODULE);
+
+ while (refcount > 0) {
+ module_put(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+
+ /* compensate for and withstand an unlikely (but still
+ * possible) race condition
+ */
+ while (refcount < 0) {
+ try_module_get(THIS_MODULE);
+ refcount = module_refcount(THIS_MODULE);
+ }
+ }
+#endif
+ /* sleep for two seconds */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
}
return 0;
@@ -1107,6 +1300,9 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (pdev->device == OCTEON_CN23XX_PF_VID)
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
dev_info(&pdev->dev, "Initializing device %x:%x.\n",
(u32)pdev->vendor, (u32)pdev->device);
@@ -1130,6 +1326,30 @@ liquidio_probe(struct pci_dev *pdev,
return -ENOMEM;
}
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ u64 scratch1;
+ u8 bus, device, function;
+
+ scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
+ if (!(scratch1 & 4ULL)) {
+ /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
+ * the lio watchdog kernel thread is running for this
+ * NIC. Each NIC gets one watchdog kernel thread.
+ */
+ scratch1 |= 4ULL;
+ octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
+ scratch1);
+
+ bus = pdev->bus->number;
+ device = PCI_SLOT(pdev->devfn);
+ function = PCI_FUNC(pdev->devfn);
+ oct_dev->watchdog_task = kthread_create(
+ liquidio_watchdog, oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
+ wake_up_process(oct_dev->watchdog_task);
+ }
+ }
+
oct_dev->rx_pause = 1;
oct_dev->tx_pause = 1;
@@ -1146,6 +1366,7 @@ liquidio_probe(struct pci_dev *pdev,
static void octeon_destroy_resources(struct octeon_device *oct)
{
int i;
+ struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1190,21 +1411,40 @@ static void octeon_destroy_resources(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* Disable interrupts */
- oct->fn_list.disable_interrupt(oct->chip);
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs - 1; i++) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ /* non-iov vector's argument is oct struct */
+ free_irq(msix_entries[i].vector, oct);
- /* Release the interrupt line */
- free_irq(oct->pci_dev->irq, oct);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ } else {
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ }
- /* fallthrough */
+ if (OCTEON_CN23XX_PF(oct))
+ octeon_free_ioq_vector(oct);
+ /* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
- if (!(oct->io_qmask.oq & (1ULL << i)))
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1226,16 +1466,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_delete_response_list(oct);
/* fallthrough */
- case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
- octeon_free_sc_buffer_pool(oct);
-
- /* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
- if (!(oct->io_qmask.iq & (1ULL << i)))
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue;
octeon_delete_instr_queue(oct, i);
}
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
/* fallthrough */
case OCT_DEV_DISPATCH_INIT_DONE:
@@ -1244,9 +1483,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
-
/* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
+ if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
+ oct->fn_list.soft_reset(oct);
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
@@ -1264,23 +1503,89 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
+ * \brief Callback for rx ctrl
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void rx_ctl_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_rx_ctl_context *ctx;
+
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (status)
+ dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ WRITE_ONCE(ctx->cond, 1);
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
* \brief Send Rx control command
* @param lio per-network private data
* @param start_stop whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
- struct octnic_ctrl_pkt nctrl;
+ struct octeon_soft_command *sc;
+ struct liquidio_rx_ctl_context *ctx;
+ union octnet_cmd *ncmd;
+ int ctx_size = sizeof(struct liquidio_rx_ctl_context);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ int retval;
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return;
- nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = start_stop;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.netpndev = (u64)lio->netdev;
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, ctx_size);
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+ ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
+
+ WRITE_ONCE(ctx->cond, 0);
+ ctx->octeon_id = lio_get_device_id(oct);
+ init_waitqueue_head(&ctx->wc);
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ sc->callback = rx_ctl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 5000;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ return;
+ oct->props[lio->ifidx].rx_on = start_stop;
+ }
+
+ octeon_free_soft_command(oct, sc);
}
/**
@@ -1307,21 +1612,24 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
- send_rx_ctrl_cmd(lio, 0);
-
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
- txqs_stop(netdev);
+ liquidio_stop(netdev);
if (oct->props[lio->ifidx].napi_enabled == 1) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
}
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_link_status_change_wq(netdev);
+
delete_glists(lio);
free_netdev(netdev);
@@ -1374,6 +1682,9 @@ static void liquidio_remove(struct pci_dev *pdev)
dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+ if (oct_dev->watchdog_task)
+ kthread_stop(oct_dev->watchdog_task);
+
if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
liquidio_stop_nic_module(oct_dev);
@@ -1417,6 +1728,12 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
s = "CN66XX";
break;
+ case OCTEON_CN23XX_PCIID_PF:
+ oct->chip_id = OCTEON_CN23XX_PF_VID;
+ ret = setup_cn23xx_octeon_pf_device(oct);
+ s = "CN23XX";
+ break;
+
default:
s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
@@ -1867,7 +2184,7 @@ static void if_cfg_callback(struct octeon_device *oct,
struct liquidio_if_cfg_context *ctx;
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
oct = lio_get_device(ctx->octeon_id);
if (resp->status)
@@ -2060,11 +2377,14 @@ static void napi_schedule_wrapper(void *param)
*/
static void liquidio_napi_drv_callback(void *arg)
{
+ struct octeon_device *oct;
struct octeon_droq *droq = arg;
int this_cpu = smp_processor_id();
- if (droq->cpu_id == this_cpu) {
- napi_schedule(&droq->napi);
+ oct = droq->oct_dev;
+
+ if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
+ napi_schedule_irqoff(&droq->napi);
} else {
struct call_single_data *csd = &droq->csd;
@@ -2173,17 +2493,15 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
lio->ifidx), NULL);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime DROQ(RxQ) creation failed.\n",
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
__func__);
return 1;
}
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- dev_dbg(&octeon_dev->pci_dev->dev,
- "netif_napi_add netdev:%llx oct:%llx\n",
- (u64)netdev,
- (u64)octeon_dev);
+ dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
+ (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
@@ -2195,6 +2513,14 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
}
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ /* 23XX PF can receive control messages (via the first PF-owned
+ * droq) from the firmware even if the ethX interface is down,
+ * so that's why poll_mode must be off for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+ }
+
/* set up IQs. */
for (q = 0; q < lio->linfo.num_txpciq; q++) {
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
@@ -2235,7 +2561,7 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
* \brief Sets up the txq poll check
* @param netdev network device
*/
-static inline void setup_tx_poll_fn(struct net_device *netdev)
+static inline int setup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -2244,21 +2570,24 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
- return;
+ return -1;
}
INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
octnet_poll_check_txq_status);
lio->txq_status_wq.wk.ctxptr = lio;
queue_delayed_work(lio->txq_status_wq.wq,
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+ return 0;
}
static inline void cleanup_tx_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- destroy_workqueue(lio->txq_status_wq.wq);
+ if (lio->txq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+ }
}
/**
@@ -2276,24 +2605,34 @@ static int liquidio_open(struct net_device *netdev)
napi_enable(napi);
oct->props[lio->ifidx].napi_enabled = 1;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 1;
}
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- setup_tx_poll_fn(netdev);
-
- start_txq(netdev);
+ /* Ready for link status updates */
+ lio->intf_open = 1;
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ } else {
+ if (setup_tx_poll_fn(netdev))
+ return -1;
+ }
+
+ start_txq(netdev);
+
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
- /* Ready for link status updates */
- lio->intf_open = 1;
-
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
@@ -2328,7 +2667,12 @@ static int liquidio_stop(struct net_device *netdev)
/* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cleanup_tx_poll_fn(netdev);
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+ } else {
+ cleanup_tx_poll_fn(netdev);
+ }
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
@@ -2340,143 +2684,6 @@ static int liquidio_stop(struct net_device *netdev)
return 0;
}
-void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
-{
- struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
- struct net_device *netdev = (struct net_device *)nctrl->netpndev;
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- u8 *mac;
-
- switch (nctrl->ncmd.s.cmd) {
- case OCTNET_CMD_CHANGE_DEVFLAGS:
- case OCTNET_CMD_SET_MULTI_LIST:
- break;
-
- case OCTNET_CMD_CHANGE_MACADDR:
- mac = ((u8 *)&nctrl->udd[0]) + 2;
- netif_info(lio, probe, lio->netdev,
- "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- "MACAddr changed to", mac[0], mac[1],
- mac[2], mac[3], mac[4], mac[5]);
- break;
-
- case OCTNET_CMD_CHANGE_MTU:
- /* If command is successful, change the MTU. */
- netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param1);
- dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
- netdev->name, netdev->mtu,
- nctrl->ncmd.s.param1);
- rtnl_lock();
- netdev->mtu = nctrl->ncmd.s.param1;
- call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
- rtnl_unlock();
- break;
-
- case OCTNET_CMD_GPIO_ACCESS:
- netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
-
- break;
-
- case OCTNET_CMD_LRO_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_LRO_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
-
- case OCTNET_CMD_VERBOSE_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ENABLE_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
- netdev->name);
- break;
-
- case OCTNET_CMD_ADD_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_DEL_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
-
- case OCTNET_CMD_SET_SETTINGS:
- dev_info(&oct->pci_dev->dev, "%s settings changed\n",
- netdev->name);
-
- break;
- /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_RX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_RXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s RX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_TX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Enabled\n",
- netdev->name);
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_TXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "%s TX Checksum Offload Disabled\n",
- netdev->name);
- }
- break;
-
- /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_VXLAN_PORT_CONFIG:
- if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d ADDED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- } else if (nctrl->ncmd.s.more ==
- OCTNET_CMD_VXLAN_PORT_DEL) {
- netif_info(lio, probe, lio->netdev,
- "%s VxLAN Destination UDP PORT:%d DELETED\n",
- netdev->name,
- nctrl->ncmd.s.param1);
- }
- break;
-
- case OCTNET_CMD_SET_FLOW_CTL:
- netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
- break;
-
- default:
- dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
- nctrl->ncmd.s.cmd);
- }
-}
-
/**
* \brief Converts a mask based on net device flags
* @param netdev network device
@@ -2817,8 +3024,7 @@ static void handle_timestamp(struct octeon_device *oct,
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
- struct octnet_buf_free_info *finfo,
- int xmit_more)
+ struct octnet_buf_free_info *finfo)
{
int retval;
struct octeon_soft_command *sc;
@@ -2846,9 +3052,15 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
+ if (OCTEON_CN23XX_PF(oct))
+ len = (u32)((struct octeon_instr_ih3 *)
+ (&sc->cmd.cmd3.ih3))->dlengsz;
+ else
+ len = (u32)((struct octeon_instr_ih2 *)
+ (&sc->cmd.cmd2.ih2))->dlengsz;
+
+ ring_doorbell = 1;
- ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
@@ -2881,7 +3093,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
union tx_info *tx_info;
int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more, j;
+ int j;
u64 dptr = 0;
u32 tag = 0;
@@ -2980,7 +3192,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
@@ -3055,15 +3270,23 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
- ndata.cmd.cmd2.dptr = dptr;
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
- tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ if (OCTEON_CN23XX_PF(oct)) {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+ } else {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ }
if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
@@ -3077,12 +3300,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
-
if (unlikely(cmdsetup.s.timestamp))
- status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo);
else
- status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ status = octnet_send_nic_data_pkt(oct, &ndata);
if (status == IQ_SEND_FAILED)
goto lio_xmit_failed;
@@ -3190,8 +3411,8 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
* OCTNET_CMD_RXCSUM_DISABLE
* @returns SUCCESS or FAILURE
*/
-int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
- u8 rx_cmd)
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -3249,31 +3470,6 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
return ret;
}
-int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
-{
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octnic_ctrl_pkt nctrl;
- int ret = 0;
-
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
-
- nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = param1;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
- nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
- ret);
- }
- return ret;
-}
-
/** \brief Net device fix features
* @param netdev pointer to network device
* @param request features requested
@@ -3492,8 +3688,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
- u32 resp_size, ctx_size;
+ u32 resp_size, ctx_size, data_size;
u32 ifidx_or_pfnum;
+ struct lio_version *vdata;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3515,21 +3712,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct liquidio_if_cfg_resp);
ctx_size = sizeof(struct liquidio_if_cfg_context);
+ data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(octeon_dev, 0,
+ octeon_alloc_soft_command(octeon_dev, data_size,
resp_size, ctx_size);
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
- num_iqueues =
- CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- num_oqueues =
- CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
- base_queue =
- CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
- gmx_port_id =
- CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- ifidx_or_pfnum = i;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ num_iqueues = octeon_dev->sriov_info.num_pf_rings;
+ num_oqueues = octeon_dev->sriov_info.num_pf_rings;
+ base_queue = octeon_dev->sriov_info.pf_srn;
+
+ gmx_port_id = octeon_dev->pf_num;
+ ifidx_or_pfnum = octeon_dev->pf_num;
+ } else {
+ num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ base_queue = CFG_GET_BASE_QUE_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ gmx_port_id = CFG_GET_GMXID_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ ifidx_or_pfnum = i;
+ }
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
@@ -3566,7 +3779,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- sleep_cond(&ctx->wc, &ctx->cond);
+ if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
+ dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
+ goto setup_nic_wait_intr;
+ }
+
retval = resp->status;
if (retval) {
dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
@@ -3633,12 +3850,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_GRO
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ if (OCTEON_CN23XX_PF(octeon_dev) ||
+ OCTEON_CN6XXX(octeon_dev)) {
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ }
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
@@ -3713,7 +3934,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
- octeon_dev->priv_flags = 0x0;
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
if (netdev->features & NETIF_F_LRO)
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
@@ -3725,6 +3949,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
liquidio_set_feature(netdev,
OCTNET_CMD_VERBOSE_ENABLE, 0);
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_fail;
+
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
@@ -3760,6 +3987,8 @@ setup_nic_dev_fail:
octeon_free_soft_command(octeon_dev, sc);
+setup_nic_wait_intr:
+
while (i--) {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
@@ -3789,8 +4018,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* run port_config command for each port */
oct->ifcount = num_nic_ports;
- memset(oct->props, 0,
- sizeof(struct octdev_props) * num_nic_ports);
+ memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
for (i = 0; i < MAX_OCTEON_LINKS; i++)
oct->props[i].gmxport = -1;
@@ -3806,7 +4034,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
intrmod_cfg->rx_enable = 1;
- intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
@@ -3818,6 +4046,7 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3880,6 +4109,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ int fw_loaded = 0;
char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
@@ -3901,9 +4131,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (!cn23xx_fw_loaded(octeon_dev)) {
+ fw_loaded = 0;
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+ /* things might have changed */
+ if (!cn23xx_fw_loaded(octeon_dev))
+ fw_loaded = 0;
+ else
+ fw_loaded = 1;
+ } else {
+ fw_loaded = 1;
+ }
+ } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
return 1;
+ }
/* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
@@ -3925,6 +4169,22 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_set_io_queues_off(octeon_dev);
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
+ return ret;
+ }
+ }
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
/* Setup the data structures that manage this Octeon's Input queues. */
if (octeon_setup_instr_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3936,14 +4196,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
}
atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
- /* Initialize soft command buffer pool
- */
- if (octeon_setup_sc_buffer_pool(octeon_dev)) {
- dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
- return 1;
- }
- atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
-
/* Initialize lists to manage the requests of different types that
* arrive from user & kernel applications for this octeon device.
*/
@@ -3963,15 +4215,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
- /* The input and output queue registers were setup earlier (the queues
- * were not enabled). Any additional registers that need to be
- * programmed should be done now.
- */
- ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "Failed to configure device registers\n");
- return ret;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (octeon_allocate_ioq_vector(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return 1;
+ }
+
+ } else {
+ /* The input and output queue registers were setup earlier (the
+ * queues were not enabled). Any additional registers
+ * that need to be programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
}
/* Initialize the tasklet that handles output queue packet processing.*/
@@ -3985,63 +4245,76 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
/* Enable Octeon device interrupts */
- octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
/* Enable the input and output queues for this Octeon device */
- octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
+ return ret;
+ }
atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
-
- if (ddr_timeout == 0)
- dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+ if (!ddr_timeout) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
- schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
- /* Wait for the octeon to initialize DDR after the soft-reset. */
- while (ddr_timeout == 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(HZ / 10)) {
- /* user probably pressed Control-C */
+ /* Wait for the octeon to initialize DDR after the soft-reset.*/
+ while (!ddr_timeout) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
return 1;
}
- }
- ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev,
- "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
- ret);
- return 1;
- }
- if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
- dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
- return 1;
- }
+ if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
- /* Divert uboot to take commands from host instead. */
- ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
- dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
- ret = octeon_init_consoles(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
- return 1;
- }
- ret = octeon_add_console(octeon_dev, 0);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
- return 1;
- }
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
- atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
- dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
- ret = load_firmware(octeon_dev);
- if (ret) {
- dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
- return 1;
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+ /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
+ * loaded
+ */
+ if (OCTEON_CN23XX_PF(octeon_dev))
+ octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
+ 2ULL);
}
handshake[octeon_dev->octeon_id].init_ok = 1;
@@ -4057,7 +4330,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->droq[j]->pkts_credit_reg);
/* Packets can start arriving on the output queues from this point. */
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 199a8b9c7dc5..0d990accb65e 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,24 @@
#include "octeon_config.h"
-#define LIQUIDIO_BASE_VERSION "1.4"
-#define LIQUIDIO_MICRO_VERSION ".1"
#define LIQUIDIO_PACKAGE ""
-#define LIQUIDIO_VERSION "1.4.1"
+#define LIQUIDIO_BASE_MAJOR_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 4
+#define LIQUIDIO_BASE_MICRO_VERSION 1
+#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION)
+#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
+ __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
+ "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
+
+struct lio_version {
+ u16 major;
+ u16 minor;
+ u16 micro;
+ u16 reserved;
+};
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -218,6 +232,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define OCTNET_CMD_ID_ACTIVE 0x1a
+
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -296,6 +313,13 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define LIO_SOFTCMDRESP_IH2 40
+#define LIO_SOFTCMDRESP_IH3 (40 + 8)
+
+#define LIO_PCICMD_O2 24
+#define LIO_PCICMD_O3 (24 + 8)
+
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -814,6 +838,8 @@ struct oct_link_stats {
#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
#define VITESSE_PHY_GPIO_HIGH 0x2
#define VITESSE_PHY_GPIO_LOW 0x3
+#define LED_IDENTIFICATION_ON 0x1
+#define LED_IDENTIFICATION_OFF 0x0
struct oct_mdio_cmd {
u64 op;
@@ -832,7 +858,7 @@ struct oct_mdio_cmd {
/* intrmod: max. packets to trigger interrupt */
#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
/* intrmod: min. packets to trigger interrupt */
-#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
/* intrmod: max. time to trigger interrupt */
#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
/* 66xx:intrmod: min. time to trigger interrupt
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3396e3a8bab..c76556809ed1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -64,6 +64,34 @@
#define DEFAULT_NUM_NIC_PORTS_68XX 4
#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
+#define CN23XX_MAX_RINGS_PER_PF 64
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_DB_MIN 1
+#define CN23XX_DB_MAX 8
+#define CN23XX_DB_TIMEOUT 1
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_OQ_BUF_SIZE 1536
+#define CN23XX_OQ_PKTSPER_INTR 128
+/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
+#define CN23XX_OQ_REFIL_THRESHOLD 128
+
+#define CN23XX_OQ_INTR_PKT 64
+#define CN23XX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_23XX 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+/* PEMs count */
+#define CN23XX_MAX_MACS 4
+
+#define CN23XX_DEF_IQ_INTR_THRESHOLD 32
+#define CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD (64 * 1024)
/* common OCTEON configuration macros */
#define CN6XXX_CFG_IO_QUEUES 32
#define OCTEON_32BYTE_INSTR 32
@@ -92,6 +120,9 @@
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+#define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
+#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
+
#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
@@ -140,19 +171,24 @@
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
LIO_210NV, /* Two port, 68xx */
- LIO_410NV /* Four port, 68xx */
+ LIO_410NV, /* Four port, 68xx */
+ LIO_23XX /* 23xx */
};
#define LIO_210SV_NAME "210sv"
#define LIO_210NV_NAME "210nv"
#define LIO_410NV_NAME "410nv"
+#define LIO_23XX_NAME "23xx"
/** Structure to define the configuration attributes for each Input queue.
* Applicable to all Octeon processors
**/
struct octeon_iq_config {
#ifdef __BIG_ENDIAN_BITFIELD
- u64 reserved:32;
+ u64 reserved:16;
+
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
@@ -192,7 +228,10 @@ struct octeon_iq_config {
/** Minimum ticks to wait before checking for pending instructions. */
u64 db_timeout:16;
- u64 reserved:32;
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
+
+ u64 reserved:16;
#endif
};
@@ -416,11 +455,15 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
-/* Maximum number of Octeon Output queues */
-#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \
+ CN6XXX_MAX_INPUT_QUEUES)
-#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
-#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \
+ CN6XXX_MAX_OUTPUT_QUEUES)
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index bbb50ea66f16..01a50f3b0c8e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -25,12 +25,13 @@
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/crc32.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_main.h"
+#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -40,6 +41,10 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -177,6 +182,15 @@ struct octeon_pci_console_desc {
__cvmx_bootmem_desc_get(oct, addr, \
offsetof(struct cvmx_bootmem_named_block_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
/**
* This function is the implementation of the get macros defined
@@ -709,3 +723,104 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
return bytes_to_read;
}
+
+#define FBUF_SIZE (4 * 1024 * 1024)
+u8 fbuf[FBUF_SIZE];
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p = fbuf;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i, rem;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION,
+ h->version + strlen(LIQUIDIO_PACKAGE));
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ data += sizeof(struct octeon_firmware_file_header);
+
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
+
+ /* Write in 4MB chunks*/
+ rem = image_len;
+
+ while (rem) {
+ if (rem < FBUF_SIZE)
+ size = rem;
+ else
+ size = FBUF_SIZE;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
+ }
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 0eb504a4379a..586b68899b06 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -20,7 +20,6 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/pci.h>
-#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "liquidio_common.h"
@@ -32,8 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
+#include "cn23xx_pf_device.h"
/** Default configuration
* for CN66XX OCTEON Models.
@@ -420,6 +418,108 @@ static struct octeon_config default_cn68xx_210nv_conf = {
,
};
+static struct octeon_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS *
+ CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN23XX_DB_MIN,
+ .db_timeout = CN23XX_DB_TIMEOUT,
+ .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN23XX_OQ_INTR_PKT,
+ .oq_intr_time = CN23XX_OQ_INTR_TIME,
+ },
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+};
+
enum {
OCTEON_CONFIG_TYPE_DEFAULT = 0,
NUM_OCTEON_CONFS,
@@ -487,6 +587,8 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct,
} else if ((oct->chip_id == OCTEON_CN68XX) &&
(card_type == LIO_410NV)) {
ret = (void *)&default_cn68xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ ret = (void *)&default_cn23xx_conf;
}
break;
default:
@@ -501,7 +603,8 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
case OCTEON_CN66XX:
case OCTEON_CN68XX:
return lio_validate_cn6xxx_config_info(oct, conf);
-
+ case OCTEON_CN23XX_PF_VID:
+ return 0;
default:
break;
}
@@ -541,107 +644,6 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
-u8 fbuf[4 * 1024 * 1024];
-
-int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
- size_t size)
-{
- int ret = 0;
- u8 *p = fbuf;
- u32 crc32_result;
- u64 load_addr;
- u32 image_len;
- struct octeon_firmware_file_header *h;
- u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
- char *base;
-
- if (size < sizeof(struct octeon_firmware_file_header)) {
- dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
- (u32)size,
- (u32)sizeof(struct octeon_firmware_file_header));
- return -EINVAL;
- }
-
- h = (struct octeon_firmware_file_header *)data;
-
- if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
- dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
- return -EINVAL;
- }
-
- crc32_result = crc32((unsigned int)~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->crc32)) {
- dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
- crc32_result, be32_to_cpu(h->crc32));
- return -EINVAL;
- }
-
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- base = h->version + strlen(LIQUIDIO_PACKAGE);
- ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
- if (ret) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
- LIQUIDIO_BASE_VERSION, base);
- return -EINVAL;
- }
-
- if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
- dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
- be32_to_cpu(h->num_images));
- return -EINVAL;
- }
-
- dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
- h->version);
-
- data += sizeof(struct octeon_firmware_file_header);
-
- dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
- be32_to_cpu(h->num_images));
- /* load all images */
- for (i = 0; i < be32_to_cpu(h->num_images); i++) {
- load_addr = be64_to_cpu(h->desc[i].addr);
- image_len = be32_to_cpu(h->desc[i].len);
-
- dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
- image_len, load_addr);
-
- /* Write in 4MB chunks*/
- rem = image_len;
-
- while (rem) {
- if (rem < (4 * 1024 * 1024))
- size = rem;
- else
- size = 4 * 1024 * 1024;
-
- memcpy(p, data, size);
-
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
-
- data += size;
- rem -= (u32)size;
- load_addr += size;
- }
- }
- dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
- h->bootcmd);
-
- /* Invoke the bootcmd */
- ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-
- return 0;
-}
-
void octeon_free_device_mem(struct octeon_device *oct)
{
int i;
@@ -676,6 +678,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
configsize = sizeof(struct octeon_cn6xxx);
break;
+ case OCTEON_CN23XX_PF_VID:
+ configsize = sizeof(struct octeon_cn23xx_pf);
+ break;
default:
pr_err("%s: Unknown PCI Device: 0x%x\n",
__func__,
@@ -741,6 +746,45 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+int
+octeon_allocate_ioq_vector(struct octeon_device *oct)
+{
+ int i, num_ioqs = 0;
+ struct octeon_ioq_vector *ioq_vector;
+ int cpu_num;
+ int size;
+
+ if (OCTEON_CN23XX_PF(oct))
+ num_ioqs = oct->sriov_info.num_pf_rings;
+ size = sizeof(struct octeon_ioq_vector) * num_ioqs;
+
+ oct->ioq_vector = vmalloc(size);
+ if (!oct->ioq_vector)
+ return 1;
+ memset(oct->ioq_vector, 0, size);
+ for (i = 0; i < num_ioqs; i++) {
+ ioq_vector = &oct->ioq_vector[i];
+ ioq_vector->oct_dev = oct;
+ ioq_vector->iq_index = i;
+ ioq_vector->droq_index = i;
+
+ cpu_num = i % num_online_cpus();
+ cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
+
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID)
+ ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
+ else
+ ioq_vector->ioq_num = i;
+ }
+ return 0;
+}
+
+void
+octeon_free_ioq_vector(struct octeon_device *oct)
+{
+ vfree(oct->ioq_vector);
+}
+
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
@@ -749,10 +793,12 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
union oct_txpciq txpciq;
int numa_node = cpu_to_node(iq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
oct->num_iqs = 0;
@@ -769,6 +815,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
oct->instr_queue[0]->ifidx = 0;
txpciq.u64 = 0;
txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = oct->pfvf_hsword.pkind;
txpciq.s.use_qpg = 0;
txpciq.s.qpg = 0;
if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
@@ -788,14 +835,17 @@ int octeon_setup_output_queues(struct octeon_device *oct)
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
- /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf,
+ conf));
}
-
oct->num_oqs = 0;
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
if (!oct->droq[0])
@@ -812,10 +862,10 @@ int octeon_setup_output_queues(struct octeon_device *oct)
void octeon_set_io_queues_off(struct octeon_device *oct)
{
- /* Disable the i/p and o/p queues for this Octeon. */
-
- octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ if (OCTEON_CN6XXX(oct)) {
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
}
void octeon_set_droq_pkt_op(struct octeon_device *oct,
@@ -825,14 +875,16 @@ void octeon_set_droq_pkt_op(struct octeon_device *oct,
u32 reg_val = 0;
/* Disable the i/p and o/p queues for this Octeon. */
- reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ if (OCTEON_CN6XXX(oct)) {
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
- if (enable)
- reg_val = reg_val | (1 << q_no);
- else
- reg_val = reg_val & (~(1 << q_no));
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
- octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ }
}
int octeon_init_dispatch_list(struct octeon_device *oct)
@@ -1019,6 +1071,9 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
if (OCTEON_CN6XXX(oct))
num_nic_ports =
CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf));
if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
@@ -1046,6 +1101,12 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
}
oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
+
+ for (i = 0; i < oct->num_iqs; i++)
+ oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
atomic_set(&oct->status, OCT_DEV_CORE_OK);
@@ -1108,8 +1169,10 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
if (OCTEON_CN6XXX(oct)) {
default_oct_conf =
(struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_FIELD(oct, cn23xx_pf, conf));
}
-
return default_oct_conf;
}
@@ -1141,7 +1204,9 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
* So write MSB first
*/
addrhi = (addr >> 32);
- if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX) ||
+ (oct->chip_id == OCTEON_CN23XX_PF_VID))
addrhi |= 0x00060000;
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
@@ -1185,8 +1250,15 @@ int octeon_mem_access_ok(struct octeon_device *oct)
u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
- access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ if (OCTEON_CN23XX_PF(oct)) {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
+ } else {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ }
return access_okay ? 0 : 1;
}
@@ -1226,3 +1298,39 @@ int lio_get_device_id(void *dev)
return octeon_dev->octeon_id;
return -1;
}
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+ u64 instr_cnt;
+ struct octeon_device *oct = NULL;
+
+ /* the whole thing needs to be atomic, ideally */
+ if (droq) {
+ spin_lock_bh(&droq->lock);
+ writel(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ spin_unlock_bh(&droq->lock);
+ oct = droq->oct_dev;
+ }
+ if (iq) {
+ spin_lock_bh(&iq->lock);
+ writel(iq->pkt_in_done, iq->inst_cnt_reg);
+ iq->pkt_in_done = 0;
+ spin_unlock_bh(&iq->lock);
+ oct = iq->oct_dev;
+ }
+ /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
+ *to trigger tx interrupts as well, if they are pending.
+ */
+ if (oct && OCTEON_CN23XX_PF(oct)) {
+ if (droq)
+ writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
+ /*we race with firmrware here. read and write the IN_DONE_CNTS*/
+ else if (iq) {
+ instr_cnt = readq(iq->inst_cnt_reg);
+ writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
+ CN23XX_INTR_RESEND),
+ iq->inst_cnt_reg);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 01edfb404346..da15c2ae9330 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -30,13 +30,19 @@
/** PCI VendorId Device Id */
#define OCTEON_CN68XX_PCIID 0x91177d
#define OCTEON_CN66XX_PCIID 0x92177d
-
+#define OCTEON_CN23XX_PCIID_PF 0x9702177d
/** Driver identifies chips by these Ids, created by clubbing together
* DeviceId+RevisionId; Where Revision Id is not used to distinguish
* between chips, a value of 0 is used for revision id.
*/
#define OCTEON_CN68XX 0x0091
#define OCTEON_CN66XX 0x0092
+#define OCTEON_CN23XX_PF_VID 0x9702
+
+/**RevisionId for the chips */
+#define OCTEON_CN23XX_REV_1_0 0x00
+#define OCTEON_CN23XX_REV_1_1 0x01
+#define OCTEON_CN23XX_REV_2_0 0x80
/** Endian-swap modes supported by Octeon. */
enum octeon_pci_swap_mode {
@@ -46,6 +52,9 @@ enum octeon_pci_swap_mode {
OCTEON_PCI_32BIT_LW_SWAP = 3
};
+#define OCTEON_OUTPUT_INTR (2)
+#define OCTEON_ALL_INTR 0xff
+
/*--------------- PCI BAR1 index registers -------------*/
/* BAR1 Mask */
@@ -198,9 +207,9 @@ struct octeon_fn_list {
void (*setup_oq_regs)(struct octeon_device *, u32);
irqreturn_t (*process_interrupt_regs)(void *);
+ u64 (*msix_interrupt_handler)(void *);
int (*soft_reset)(struct octeon_device *);
int (*setup_device_regs)(struct octeon_device *);
- void (*reinit_regs)(struct octeon_device *);
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
@@ -209,10 +218,10 @@ struct octeon_fn_list {
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
- void (*enable_interrupt)(void *);
- void (*disable_interrupt)(void *);
+ void (*enable_interrupt)(struct octeon_device *, u8);
+ void (*disable_interrupt)(struct octeon_device *, u8);
- void (*enable_io_queues)(struct octeon_device *);
+ int (*enable_io_queues)(struct octeon_device *);
void (*disable_io_queues)(struct octeon_device *);
};
@@ -266,11 +275,72 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int rx_on;
int napi_enabled;
int gmxport;
struct net_device *netdev;
};
+#define LIO_FLAG_MSIX_ENABLED 0x1
+#define MSIX_PO_INT 0x1
+#define MSIX_PI_INT 0x2
+
+struct octeon_pf_vf_hs_word {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+#else
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+#endif
+};
+
+struct octeon_sriov_info {
+ /* Actual rings left for PF device */
+ u32 num_pf_rings;
+
+ /* SRN of PF usable IO queues */
+ u32 pf_srn;
+ /* total pf rings */
+ u32 trs;
+
+};
+
+struct octeon_ioq_vector {
+ struct octeon_device *oct_dev;
+ int iq_index;
+ int droq_index;
+ int vector;
+ struct cpumask affinity_mask;
+ u32 ioq_num;
+};
+
/** The Octeon device.
* Each Octeon device has this structure to represent all its
* components.
@@ -296,7 +366,7 @@ struct octeon_device {
/** Octeon Chip type. */
u16 chip_id;
u16 rev_id;
-
+ u16 pf_num;
/** This device's id - set by the driver. */
u32 octeon_id;
@@ -305,7 +375,6 @@ struct octeon_device {
u16 flags;
#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
/** The state of this device */
atomic_t status;
@@ -395,6 +464,19 @@ struct octeon_device {
void *priv;
+ int num_msix_irqs;
+
+ void *msix_entries;
+
+ struct octeon_sriov_info sriov_info;
+
+ struct octeon_pf_vf_hs_word pfvf_hsword;
+
+ int msix_on;
+
+ /** IOq information of it's corresponding MSI-X interrupt. */
+ struct octeon_ioq_vector *ioq_vector;
+
int rx_pause;
int tx_pause;
@@ -402,12 +484,15 @@ struct octeon_device {
/* private flags to control driver-specific features through ethtool */
u32 priv_flags;
+
+ void *watchdog_task;
};
#define OCT_DRV_ONLINE 1
#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
+#define OCTEON_CN23XX_PF(oct) (oct->chip_id == OCTEON_CN23XX_PF_VID)
#define CHIP_FIELD(oct, TYPE, field) \
(((struct octeon_ ## TYPE *)(oct->chip))->field)
@@ -661,13 +746,24 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+void octeon_free_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct);
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
+
/* LiquidIO driver pivate flags */
enum {
OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
};
-static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
- u32 val)
+#define OCT_PRIV_FLAG_DEFAULT 0x0
+
+static inline u32 lio_get_priv_flag(struct octeon_device *octdev, u32 flag)
+{
+ return !!(octdev->priv_flags & (0x1 << flag));
+}
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev,
+ u32 flag, u32 val)
{
if (val)
octdev->priv_flags |= (0x1 << flag);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index e0afe4c1fd01..f60e5320daf4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -31,6 +31,7 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -92,22 +93,25 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-/** Check for packets on Droq. This function should be called with
- * lock held.
+/** Check for packets on Droq. This function should be called with lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
+ u32 last_count;
pkt_count = readl(droq->pkts_sent_reg);
- if (pkt_count) {
- atomic_add(pkt_count, &droq->pkts_pending);
- writel(pkt_count, droq->pkts_sent_reg);
- }
- return pkt_count;
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ /* we shall write to cnts at napi irq enable or end of droq tasklet */
+ if (last_count)
+ atomic_add(last_count, &droq->pkts_pending);
+
+ return last_count;
}
static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
@@ -259,6 +263,11 @@ int octeon_init_droq(struct octeon_device *oct,
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
c_refill_threshold =
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
} else {
return 1;
}
@@ -564,7 +573,7 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
- } /* else (dispatch_fn ... */
+ }
return cnt;
}
@@ -735,16 +744,20 @@ octeon_droq_process_packets(struct octeon_device *oct,
u32 pkt_count = 0, pkts_processed = 0;
struct list_head *tmp, *tmp2;
+ /* Grab the droq lock */
+ spin_lock(&droq->lock);
+
+ octeon_droq_check_hw_for_pkts(droq);
pkt_count = atomic_read(&droq->pkts_pending);
- if (!pkt_count)
+
+ if (!pkt_count) {
+ spin_unlock(&droq->lock);
return 0;
+ }
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the droq lock */
- spin_lock(&droq->lock);
-
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
atomic_sub(pkts_processed, &droq->pkts_pending);
@@ -789,6 +802,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
spin_lock(&droq->lock);
while (total_pkts_processed < budget) {
+ octeon_droq_check_hw_for_pkts(droq);
+
pkts_available =
CVM_MIN((budget - total_pkts_processed),
(u32)(atomic_read(&droq->pkts_pending)));
@@ -803,8 +818,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
atomic_sub(pkts_processed, &droq->pkts_pending);
total_pkts_processed += pkts_processed;
-
- octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -874,8 +887,11 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
return 0;
}
break;
+ case OCTEON_CN23XX_PF_VID: {
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ }
+ break;
}
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 5a6fb9113bbd..5be002d5dba4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -261,6 +261,8 @@ struct octeon_droq {
u32 q_no;
+ u32 pkt_count;
+
struct octeon_droq_ops ops;
struct octeon_device *oct_dev;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index ff4b1d6f007b..e4d426ba18dc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -88,6 +88,8 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
+ u32 pkt_in_done;
+
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index bc14e4c27332..366298f7bcb2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -38,12 +38,26 @@
#define DRV_NAME "LiquidIO"
-/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
*/
-int octeon_console_debug_enabled(u32 console);
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
/* BQL-related functions */
void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
@@ -167,22 +181,26 @@ cnnic_numa_alloc_aligned_dma(u32 size,
#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
free_pages(orig_ptr, get_order(size))
-static inline void
+static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
+ int errno = 0;
wait_queue_t we;
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
+ if (signal_pending(current)) {
+ errno = -EINTR;
goto out;
+ }
schedule();
}
out:
set_current_state(TASK_RUNNING);
remove_wait_queue(wait_queue, &we);
+ return errno;
}
static inline void
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 95a4bbedf557..0dc081a99b30 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index fb820dc7fcb7..e5d1debd05ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -26,8 +26,6 @@
#ifndef __OCTEON_NETWORK_H__
#define __OCTEON_NETWORK_H__
-#include <linux/version.h>
-#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
@@ -124,11 +122,21 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
+
+ /* work queue for link status */
+ struct cavium_wq link_status_wq;
+
};
#define LIO_SIZE (sizeof(struct lio))
#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+#define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3))
+#define CIU3_WDOG_MASK 12ULL
+#define LIO_MONITOR_WDOG_EXPIRE 1
+#define LIO_MONITOR_CORE_STUCK_MSGD 2
+#define LIO_MAX_CORES 12
+
/**
* \brief Enable or disable feature
* @param netdev pointer to network device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 166727be928f..40ac1fe88956 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,7 +19,6 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "liquidio_common.h"
@@ -36,6 +35,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
u32 rdatasize)
{
struct octeon_soft_command *sc;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -52,10 +52,19 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ /*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ }
irh->rflag = 1; /* a response is required */
@@ -64,7 +73,10 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
*sc->status_word = COMPLETION_WORD_INIT;
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct))
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ else
+ sc->cmd.cmd2.rptr = sc->dmarptr;
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -73,12 +85,9 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
}
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata,
- u32 xmit_more)
+ struct octnic_data_pkt *ndata)
{
- int ring_doorbell;
-
- ring_doorbell = !xmit_more;
+ int ring_doorbell = 1;
return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
ndata->buf, ndata->datasize,
@@ -183,8 +192,8 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
- __func__, nctrl->ncmd.s.cmd, retval);
+ dev_err(&oct->pci_dev->dev, "%s pf_num:%d soft command:%d send failed status: %x\n",
+ __func__, oct->pf_num, nctrl->ncmd.s.cmd, retval);
spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index b71a2bbe4bee..4b8da67b995f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -138,7 +138,7 @@ octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
/* assume that rflag is cleared so therefore front data will only have
* irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih2->fsz = 24;
+ ih2->fsz = LIO_PCICMD_O2;
ih2->tagtype = ORDERED_TAG;
ih2->grp = DEFAULT_POW_GRP;
@@ -196,7 +196,7 @@ octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
*/
ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
/*PKI IH*/
- ih3->fsz = 24 + 8;
+ ih3->fsz = LIO_PCICMD_O3;
if (!setup->s.gather) {
ih3->dlengsz = setup->s.u.datasize;
@@ -278,7 +278,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata, u32 xmit_more);
+ struct octnic_data_pkt *ndata);
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index d32492f185ff..90866bb50033 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -30,6 +30,7 @@
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -71,7 +72,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
-
+ else if (OCTEON_CN23XX_PF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf)));
if (!conf) {
dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
oct->chip_id);
@@ -88,6 +90,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+
iq->oct_dev = oct;
set_dev_node(&oct->pci_dev->dev, numa_node);
@@ -181,6 +184,9 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
if (OCTEON_CN6XXX(oct))
desc_size =
CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+ else if (OCTEON_CN23XX_PF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf));
vfree(iq->request_list);
@@ -383,7 +389,12 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (OCTEON_CN23XX_PF(oct))
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd3.irh;
+ else
+ irh = (struct octeon_instr_irh *)
+ &sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -499,6 +510,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
if (!oct)
return;
+
iq = oct->instr_queue[iq_no];
if (!iq)
return;
@@ -514,6 +526,8 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
/* Flush the instruction queue */
octeon_flush_iq(oct, iq, 1, 0);
+
+ lio_enable_irq(NULL, iq);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -580,6 +594,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
{
struct octeon_config *oct_cfg;
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -588,36 +604,88 @@ octeon_prepare_soft_command(struct octeon_device *oct,
oct_cfg = octeon_get_conf(oct);
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- ih2->tagtype = ATOMIC_TAG;
- ih2->tag = LIO_CONTROL;
- ih2->raw = 1;
- ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
-
- if (sc->datasize) {
- ih2->dlengsz = sc->datasize;
- ih2->rs = 1;
- }
-
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- irh->opcode = opcode;
- irh->subcode = subcode;
-
- /* opcode/subcode specific parameters (ossp) */
- irh->ossp = irh_ossp;
- sc->cmd.cmd2.ossp[0] = ossp0;
- sc->cmd.cmd2.ossp[1] = ossp1;
-
- if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rdp->pcie_port = oct->pcie_port;
- rdp->rlen = sc->rdatasize;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = ATOMIC_TAG;
+ pki_ih3->qpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /*PKI IH3*/
+ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /*PKI IH3*/
+ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+ ih3->fsz = LIO_PCICMD_O3;
+ }
- irh->rflag = 1;
- ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
- irh->rflag = 0;
- ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ } else {
+ irh->rflag = 0;
+ /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = LIO_PCICMD_O2;
+ }
}
}
@@ -625,23 +693,39 @@ int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
struct octeon_instr_irh *irh;
u32 len;
- ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
- if (ih2->dlengsz) {
- WARN_ON(!sc->dmadptr);
- sc->cmd.cmd2.dptr = sc->dmadptr;
- }
- irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
- if (irh->rflag) {
- WARN_ON(!sc->dmarptr);
- WARN_ON(!sc->status_word);
- *sc->status_word = COMPLETION_WORD_INIT;
-
- sc->cmd.cmd2.rptr = sc->dmarptr;
+ if (OCTEON_CN23XX_PF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+ len = (u32)ih3->dlengsz;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+ }
+ len = (u32)ih2->dlengsz;
}
- len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 709049e36627..be52178d8cb6 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -91,8 +91,13 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
- rptr = sc->cmd.cmd2.rptr;
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rptr = sc->cmd.cmd3.rptr;
+ } else {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
+ }
status = OCTEON_REQUEST_PENDING;
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
index 5c4615ccaa14..6b4d4add7353 100644
--- a/drivers/net/ethernet/cavium/thunder/Makefile
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -2,6 +2,7 @@
# Makefile for Cavium's Thunder ethernet device
#
+obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e29815d9e6f4..30426109711c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -20,6 +20,17 @@
#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
+
+
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4
@@ -41,40 +52,8 @@
/* Max pkinds */
#define NIC_MAX_PKIND 16
-/* Rx Channels */
-/* Receive channel configuration in TNS bypass mode
- * Below is configuration in TNS bypass mode
- * BGX0-LMAC0-CHAN0 - VNIC CHAN0
- * BGX0-LMAC1-CHAN0 - VNIC CHAN16
- * ...
- * BGX1-LMAC0-CHAN0 - VNIC CHAN128
- * ...
- * BGX1-LMAC3-CHAN0 - VNIC CHAN174
- */
-#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
-#define NIC_CHANS_PER_INF 128
-#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
-#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
-
-/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
-#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
-#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
-#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
-#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
-
-/* Tx scheduling */
-#define NIC_MAX_TL4 1024
-#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
-#define NIC_MAX_TL3 256
-#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
-#define NIC_MAX_TL2 64
-#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
-#define NIC_MAX_TL1 2
-
-/* TNS bypass mode */
-#define NIC_TL2_PER_BGX 32
-#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
-#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+/* Max when CPI_ALG is IP diffserv */
+#define NIC_MAX_CPI_PER_LMAC 64
/* NIC VF Interrupts */
#define NICVF_INTR_CQ 0
@@ -148,7 +127,6 @@ struct nicvf_cq_poll {
struct napi_struct napi;
};
-#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
@@ -273,6 +251,7 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
+#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
u8 vf_id;
@@ -326,7 +305,7 @@ struct nicvf {
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
- char irq_name[NIC_VF_MSIX_VECTORS][20];
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
@@ -369,6 +348,7 @@ struct nicvf {
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
@@ -485,6 +465,31 @@ struct set_loopback {
bool enable;
};
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ u8 msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ u16 rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ u8 tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ u16 rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ u16 sq_stat_mask;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -502,6 +507,7 @@ union nic_mbx {
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
};
#define NIC_NODE_ID_MASK 0x03
@@ -515,7 +521,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
static inline bool pass1_silicon(struct pci_dev *pdev)
{
- return pdev->revision < 8;
+ return (pdev->revision < 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
+}
+
+static inline bool pass2_silicon(struct pci_dev *pdev)
+{
+ return (pdev->revision >= 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}
int nicvf_set_real_num_queues(struct net_device *netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 85cc782b9060..2bbf4cbf08b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -20,8 +20,25 @@
#define DRV_NAME "thunder-nic"
#define DRV_VERSION "1.0"
+struct hw_info {
+ u8 bgx_cnt;
+ u8 chans_per_lmac;
+ u8 chans_per_bgx; /* Rx/Tx chans */
+ u8 chans_per_rgx;
+ u8 chans_per_lbk;
+ u16 cpi_cnt;
+ u16 rssi_cnt;
+ u16 rss_ind_tbl_size;
+ u16 tl4_cnt;
+ u16 tl3_cnt;
+ u8 tl2_cnt;
+ u8 tl1_cnt;
+ bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
+};
+
struct nicpf {
struct pci_dev *pdev;
+ struct hw_info *hw;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
@@ -36,22 +53,22 @@ struct nicpf {
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
- u8 vf_lmac_map[MAX_LMAC];
+ u8 *vf_lmac_map;
struct delayed_work dwork;
struct workqueue_struct *check_link;
- u8 link[MAX_LMAC];
- u8 duplex[MAX_LMAC];
- u32 speed[MAX_LMAC];
+ u8 *link;
+ u8 *duplex;
+ u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
bool msix_enabled;
u8 num_vec;
- struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ struct msix_entry *msix_entries;
bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ char irq_name[NIC_PF_MSIX_VECTORS][20];
};
/* Supported devices */
@@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset)
/* PF -> VF mailbox communication APIs */
static void nic_enable_mbx_intr(struct nicpf *nic)
{
- /* Enable mailbox interrupt for all 128 VFs */
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
- nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+ int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
+
+#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
+
+ /* Clear it, to avoid spurious interrupts (if any) */
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
+
+ /* Enable mailbox interrupt for all VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
+ /* One mailbox intr enable reg per 64 VFs */
+ if (vf_cnt > 64) {
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ }
}
static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
@@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
@@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node;
- mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+ mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -248,7 +278,8 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
/* Set minimum transmit packet size */
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
{
- int lmac;
+ int lmac, max_lmac;
+ u16 sdevid;
u64 lmac_cfg;
/* There is a issue in HW where-in while sending GSO sized
@@ -260,7 +291,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
if (size > 52)
size = 52;
- for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ /* 81xx's RGX has only one LMAC */
+ if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
+ max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+ else
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ for (lmac = 0; lmac < max_lmac; lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
lmac_cfg &= ~(0xF << 2);
lmac_cfg |= ((size / 4) << 2);
@@ -280,7 +318,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->num_vf_en = 0;
- for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
if (!(bgx_map & (1 << bgx)))
continue;
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
@@ -300,28 +338,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic_reg_write(nic,
NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
lmac_credit);
+
+ /* On CN81XX there are only 8 VFs but max possible no of
+ * interfaces are 9.
+ */
+ if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
+ nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
+ break;
+ }
}
}
+static void nic_free_lmacmem(struct nicpf *nic)
+{
+ kfree(nic->vf_lmac_map);
+ kfree(nic->link);
+ kfree(nic->duplex);
+ kfree(nic->speed);
+}
+
+static int nic_get_hw_info(struct nicpf *nic)
+{
+ u8 max_lmac;
+ u16 sdevid;
+ struct hw_info *hw = nic->hw;
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN88XX;
+ hw->chans_per_lmac = 16;
+ hw->chans_per_bgx = 128;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 4096;
+ hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 2;
+ hw->tl1_per_bgx = true;
+ break;
+ case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN81XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_rgx = 8;
+ hw->chans_per_lbk = 24;
+ hw->cpi_cnt = 512;
+ hw->rssi_cnt = 256;
+ hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 64;
+ hw->tl2_cnt = 16;
+ hw->tl1_cnt = 10;
+ hw->tl1_per_bgx = false;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN83XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_lbk = 64;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 1024;
+ hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 18;
+ hw->tl1_per_bgx = false;
+ break;
+ }
+ hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
+
+ /* Allocate memory for LMAC tracking elements */
+ max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
+ nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->vf_lmac_map)
+ goto error;
+ nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->link)
+ goto error;
+ nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->duplex)
+ goto error;
+ nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
+ if (!nic->speed)
+ goto error;
+ return 0;
+
+error:
+ nic_free_lmacmem(nic);
+ return -ENOMEM;
+}
+
#define BGX0_BLOCK 8
#define BGX1_BLOCK 9
-static void nic_init_hw(struct nicpf *nic)
+static int nic_init_hw(struct nicpf *nic)
{
- int i;
+ int i, err;
u64 cqm_cfg;
+ /* Get HW capability info */
+ err = nic_get_hw_info(nic);
+ if (err)
+ return err;
+
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
- /* Disable TNS mode on both interfaces */
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
- (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
- nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
- (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ /* TNS and TNS bypass modes are present only on 88xx */
+ if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ }
+
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
(1ULL << 63) | BGX0_BLOCK);
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
@@ -351,11 +486,14 @@ static void nic_init_hw(struct nicpf *nic)
cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
+
+ return 0;
}
/* Channel parse index configuration */
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
{
+ struct hw_info *hw = nic->hw;
u32 vnic, bgx, lmac, chan;
u32 padd, cpi_count = 0;
u64 cpi_base, cpi, rssi_base, rssi;
@@ -365,9 +503,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
- rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+ rssi_base = vnic * hw->rss_ind_tbl_size;
/* Rx channel configuration */
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
@@ -439,7 +577,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf)
msg = (u64 *)&mbx;
mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
- mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -486,7 +624,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
/* 4 level transmit side scheduler configutation
* for TNS bypass mode
*
- * Sample configuration for SQ0
+ * Sample configuration for SQ0 on 88xx
* VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
* VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
* VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
@@ -499,6 +637,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq)
{
+ struct hw_info *hw = nic->hw;
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
@@ -517,21 +656,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- if (!sq->sqs_mode) {
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
- } else {
- for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
- if (nic->vf_sqs[pqs_vnic][svf] == vnic)
- break;
+ /* For 88xx 0-511 TL4 transmits via BGX0 and
+ * 512-1023 TL4s transmit via BGX1.
+ */
+ if (hw->tl1_per_bgx) {
+ tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+ if (!sq->sqs_mode) {
+ tl4 += (lmac * MAX_QUEUES_PER_QSET);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+ tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
+ tl4 += (svf * MAX_QUEUES_PER_QSET);
}
- tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
- tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
- tl4 += (svf * NIC_TL4_PER_LMAC);
- tl4 += (bgx * NIC_TL4_PER_BGX);
+ } else {
+ tl4 = (vnic * MAX_QUEUES_PER_QSET);
}
tl4 += sq_idx;
- tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
@@ -539,8 +685,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
- chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
- nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+
+ /* On 88xx 0-127 channels are for BGX0 and
+ * 127-255 channels for BGX1.
+ *
+ * On 81xx/83xx TL3_CHAN reg should be configured with channel
+ * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+ */
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ if (hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ else
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
/* Enable backpressure on the channel */
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
@@ -549,6 +706,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
/* No priorities as of now */
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+ /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+ * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+ * possible LMACs.
+ *
+ * This register doesn't exist on 88xx.
+ */
+ if (!hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+ lmac + (bgx * MAX_LMAC_PER_BGX));
}
/* Send primary nicvf pointer to secondary QS's VF */
@@ -620,7 +787,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
int bgx_idx, lmac_idx;
- if (lbk->vf_id > MAX_LMAC)
+ if (lbk->vf_id >= nic->num_vf_en)
return -1;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
@@ -631,6 +798,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+/* Reset statistics counters */
+static int nic_reset_stat_counters(struct nicpf *nic,
+ int vf, struct reset_stat_cfg *cfg)
+{
+ int i, stat, qnum;
+ u64 reg_addr;
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
+ if (cfg->rx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
+ if (cfg->tx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i <= 15; i++) {
+ qnum = i >> 1;
+ stat = i & 1 ? 1 : 0;
+ reg_addr = (vf << NIC_QS_ID_SHIFT) |
+ (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
+ if (cfg->rq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ if (cfg->sq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+ return 0;
+}
+
+static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
+{
+ u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
+ u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
+ (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
+
+ /* Configure tunnel parsing parameters */
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
+ (1ULL << 63 | UDP_GENEVE_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
+ ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
+ ((0xfULL << 60) | vxlan_prot_def));
+}
+
static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
{
int bgx, lmac;
@@ -669,18 +897,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx_addr += sizeof(u64);
}
- dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
__func__, mbx.msg.msg, vf);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- if (vf < MAX_LMAC) {
+ if (vf < nic->num_vf_en) {
nic->link[vf] = 0;
nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
- ret = 1;
- break;
+ goto unlock;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -698,6 +925,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ /* Enable CQE_RX2_S extension in CQE_RX descriptor.
+ * This gets appended by default on 81xx/83xx chips,
+ * for consistency enabling the same on 88xx pass2
+ * where this is introduced.
+ */
+ if (pass2_silicon(nic->pdev))
+ nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+ if (!pass1_silicon(nic->pdev))
+ nic_enable_tunnel_parsing(nic, vf);
break;
case NIC_MBOX_MSG_RQ_BP_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
@@ -722,8 +958,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
- if (vf >= nic->num_vf_en)
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
break;
+ }
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -772,25 +1010,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
+ case NIC_MBOX_MSG_RESET_STAT_COUNTER:
+ ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
break;
}
- if (!ret)
+ if (!ret) {
nic_mbx_send_ack(nic, vf);
- else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
+ dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
+ mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
+ }
unlock:
nic->mbx_lock[vf] = false;
}
-static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+ int mbx;
u64 intr;
u8 vf, vf_per_mbx_reg = 64;
+ if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector)
+ mbx = 0;
+ else
+ mbx = 1;
+
intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
for (vf = 0; vf < vf_per_mbx_reg; vf++) {
@@ -802,23 +1053,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
nic_clear_mbx_intr(nic, vf, mbx);
}
}
-}
-
-static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 0);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
-{
- struct nicpf *nic = (struct nicpf *)nic_irq;
-
- nic_mbx_intr_handler(nic, 1);
-
return IRQ_HANDLED;
}
@@ -826,7 +1060,13 @@ static int nic_enable_msix(struct nicpf *nic)
{
int i, ret;
- nic->num_vec = NIC_PF_MSIX_VECTORS;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+
+ nic->msix_entries = kmalloc_array(nic->num_vec,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic->msix_entries)
+ return -ENOMEM;
for (i = 0; i < nic->num_vec; i++)
nic->msix_entries[i].entry = i;
@@ -834,8 +1074,9 @@ static int nic_enable_msix(struct nicpf *nic)
ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
if (ret) {
dev_err(&nic->pdev->dev,
- "Request for #%d msix vectors failed\n",
- nic->num_vec);
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ kfree(nic->msix_entries);
return ret;
}
@@ -847,6 +1088,7 @@ static void nic_disable_msix(struct nicpf *nic)
{
if (nic->msix_enabled) {
pci_disable_msix(nic->pdev);
+ kfree(nic->msix_entries);
nic->msix_enabled = 0;
nic->num_vec = 0;
}
@@ -865,27 +1107,26 @@ static void nic_free_all_interrupts(struct nicpf *nic)
static int nic_register_interrupts(struct nicpf *nic)
{
- int ret;
+ int i, ret;
/* Enable MSI-X */
ret = nic_enable_msix(nic);
if (ret)
return ret;
- /* Register mailbox interrupt handlers */
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
- nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
- if (ret)
- goto fail;
-
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+ /* Register mailbox interrupt handler */
+ for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
+ sprintf(nic->irq_name[i],
+ "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
- nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
- if (ret)
- goto fail;
+ ret = request_irq(nic->msix_entries[i].vector,
+ nic_mbx_intr_handler, 0,
+ nic->irq_name[i], nic);
+ if (ret)
+ goto fail;
- nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+ nic->irq_allocated[i] = true;
+ }
/* Enable mailbox interrupt */
nic_enable_mbx_intr(nic);
@@ -894,6 +1135,7 @@ static int nic_register_interrupts(struct nicpf *nic)
fail:
dev_err(&nic->pdev->dev, "Request irq failed\n");
nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
return ret;
}
@@ -908,6 +1150,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
u16 total_vf;
+ /* Secondary Qsets are needed only if CPU count is
+ * morethan MAX_QUEUES_PER_QSET.
+ */
+ if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
+ return 0;
+
/* Check if its a multi-node environment */
if (nr_node_ids > 1)
sqs_per_vf = MAX_SQS_PER_VF;
@@ -1013,6 +1261,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!nic)
return -ENOMEM;
+ nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
+ if (!nic->hw) {
+ devm_kfree(dev, nic);
+ return -ENOMEM;
+ }
+
pci_set_drvdata(pdev, nic);
nic->pdev = pdev;
@@ -1052,13 +1306,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->node = nic_get_node_id(pdev);
- nic_set_lmac_vf_mapping(nic);
-
/* Initialize hardware */
- nic_init_hw(nic);
+ err = nic_init_hw(nic);
+ if (err)
+ goto err_release_regions;
- /* Set RSS TBL size for each VF */
- nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ nic_set_lmac_vf_mapping(nic);
/* Register interrupts */
err = nic_register_interrupts(nic);
@@ -1091,6 +1344,9 @@ err_unregister_interrupts:
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
+ nic_free_lmacmem(nic);
+ devm_kfree(dev, nic->hw);
+ devm_kfree(dev, nic);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
@@ -1111,6 +1367,11 @@ static void nic_remove(struct pci_dev *pdev)
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
+
+ nic_free_lmacmem(nic);
+ devm_kfree(&pdev->dev, nic->hw);
+ devm_kfree(&pdev->dev, nic);
+
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index fab35a593898..edf779f5a227 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -36,6 +36,20 @@
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_RX_GENEVE_DEF (0x0580)
+#define UDP_GENEVE_PORT_NUM 0x17C1ULL
+#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588)
+#define IPV6_PROT 0x86DDULL
+#define IPV4_PROT 0x800ULL
+#define ET_PROT 0x6558ULL
+#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598)
+#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0)
+#define UDP_VXLAN_PORT_NUM 0x12B5
+#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0)
+#define IPV6_PROT_DEF 0x2ULL
+#define IPV4_PROT_DEF 0x1ULL
+#define ET_PROT_DEF 0x3ULL
+#define NIC_PF_RX_CFG (0x05D0)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
@@ -103,6 +117,7 @@
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_LMAC (0x540000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3240349615bd..45a13f718863 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -29,10 +29,20 @@
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA134) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_NIC_VF) },
{ 0, } /* end of table */
};
@@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
- if (nic->pf_nacked)
+ if (nic->pf_nacked) {
+ netdev_err(nic->netdev,
+ "PF NACK to mbox msg 0x%02x from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
return -EINVAL;
+ }
msleep(sleep);
if (nic->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
netdev_err(nic->netdev,
- "PF didn't ack to mbox msg %d from VF%d\n",
+ "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return -EBUSY;
}
@@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic)
rss->enable = true;
- /* Using the HW reset value for now */
- rss->key[0] = 0xFEED0BADFEED0BADULL;
- rss->key[1] = 0xFEED0BADFEED0BADULL;
- rss->key[2] = 0xFEED0BADFEED0BADULL;
- rss->key[3] = 0xFEED0BADFEED0BADULL;
- rss->key[4] = 0xFEED0BADFEED0BADULL;
-
+ netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
@@ -507,7 +515,9 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
- struct cqe_send_t *cqe_tx, int cqe_type)
+ struct cqe_send_t *cqe_tx,
+ int cqe_type, int budget,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -538,7 +548,9 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
- dev_consume_skb_any(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
+ napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of SW TSO on 88xx, only last segment will have
@@ -653,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
loop:
@@ -691,7 +704,8 @@ loop:
break;
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
- (void *)cq_desc, CQE_TYPE_SEND);
+ (void *)cq_desc, CQE_TYPE_SEND,
+ budget, &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -720,6 +734,9 @@ done:
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
@@ -933,16 +950,19 @@ static int nicvf_register_interrupts(struct nicvf *nic)
int vector;
for_each_cq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
- nic->vf_id, irq);
+ sprintf(nic->irq_name[irq], "%s-rxtx-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq));
for_each_sq_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
- nic->vf_id, irq - NICVF_INTR_ID_SQ);
+ sprintf(nic->irq_name[irq], "%s-sq-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
for_each_rbdr_irq(irq)
- sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
- nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+ sprintf(nic->irq_name[irq], "%s-rbdr-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
/* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
@@ -966,8 +986,9 @@ static int nicvf_register_interrupts(struct nicvf *nic)
}
/* Register QS error interrupt */
- sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
- "NICVF%d Qset error", nic->vf_id);
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR;
ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler,
@@ -1146,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -1196,7 +1220,7 @@ int nicvf_open(struct net_device *netdev)
}
/* Check if we got MAC address from PF or else generate a radom MAC */
- if (is_zero_ether_addr(netdev->dev_addr)) {
+ if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev);
nicvf_hw_set_mac_addr(nic, netdev);
}
@@ -1533,14 +1557,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- qcount = MAX_CMP_QUEUES_PER_QS;
+ qcount = netif_get_num_default_rss_queues();
/* Restrict multiqset support only for host bound VFs */
if (pdev->is_virtfn) {
/* Set max number of queues per VF */
- qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
- qcount = min(qcount,
- (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ qcount = min_t(int, num_online_cpus(),
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
}
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index dda3ea3f3bb6..a4fc50155881 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
}
+static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ /* Reset all RXQ's stats */
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
/* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
@@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->qs = qs;
/* Set count of each queue */
- qs->rbdr_cnt = RBDR_CNT;
- qs->rq_cnt = RCV_QUEUE_CNT;
- qs->sq_cnt = SND_QUEUE_CNT;
- qs->cq_cnt = CMP_QUEUE_CNT;
+ qs->rbdr_cnt = DEFAULT_RBDR_CNT;
+ qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
+ qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
+ qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
/* Set queue lengths */
qs->rbdr_len = RCV_BUF_COUNT;
@@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
nicvf_free_resources(nic);
}
+ /* Reset RXQ's stats.
+ * SQ's stats will get reset automatically once SQ is reset.
+ */
+ nicvf_reset_rcv_queue_stats(nic);
+
return 0;
}
@@ -1067,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
imm->len = 1;
}
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1126,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb;
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- /* Inform HW to xmit all TSO segments */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1204,12 +1233,8 @@ doorbell:
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
- /* Inform HW to xmit new packet */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, subdesc_cnt);
return 1;
append_fail:
@@ -1234,13 +1259,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int frag;
int payload_len = 0;
struct sk_buff *skb = NULL;
- struct sk_buff *skb_frag = NULL;
- struct sk_buff *prev_frag = NULL;
+ struct page *page;
+ int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
- rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+ * CQE_RX at word6, hence buffer pointers move by word
+ *
+ * Use existing 'hw_tso' flag which will be set for all chips
+ * except 88xx pass1 instead of a additional cache line
+ * access (or miss) by using pci dev's revision.
+ */
+ if (!nic->hw_tso)
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ else
+ rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
@@ -1258,22 +1293,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
- payload_len);
- if (!skb_frag) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- if (!skb_shinfo(skb)->frag_list)
- skb_shinfo(skb)->frag_list = skb_frag;
- else
- prev_frag->next = skb_frag;
-
- prev_frag = skb_frag;
- skb->len += payload_len;
- skb->data_len += payload_len;
- skb_frag->len = payload_len;
+ page = virt_to_page(phys_to_virt(*rb_ptrs));
+ offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset, payload_len, RCV_FRAG_LEN);
}
/* Next buffer pointer */
rb_ptrs++;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 6673e1133523..869f3386028b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -57,10 +57,7 @@
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
-#define RBDR_CNT 1
-#define RCV_QUEUE_CNT 8
-#define SND_QUEUE_CNT 8
-#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+#define DEFAULT_RBDR_CNT 1
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 63a39ac97d53..8bbaedbb7b94 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -28,6 +28,9 @@ struct lmac {
struct bgx *bgx;
int dmac;
u8 mac[ETH_ALEN];
+ u8 lmac_type;
+ u8 lane_to_sds;
+ bool use_training;
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -43,14 +46,13 @@ struct lmac {
struct bgx {
u8 bgx_id;
- u8 qlm_mode;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
- int lmac_type;
- int lane_to_sds;
- int use_training;
+ u8 max_lmac;
void __iomem *reg_base;
struct pci_dev *pdev;
+ bool is_dlm;
+ bool is_rgx;
};
static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
@@ -61,6 +63,7 @@ static int bgx_xaui_check_link(struct lmac *lmac);
/* Supported devices */
static const struct pci_device_id bgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
{ 0, } /* end of table */
};
@@ -124,8 +127,8 @@ unsigned bgx_get_map(int node)
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
map |= (1 << i);
}
@@ -138,7 +141,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac_count;
@@ -153,7 +156,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -166,7 +169,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -177,7 +180,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -188,11 +191,13 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct lmac *lmac;
u64 cfg;
if (!bgx)
return;
+ lmac = &bgx->lmac[lmacid];
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
if (enable)
@@ -200,6 +205,9 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
else
cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+ xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
}
EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
@@ -266,9 +274,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
- /* renable lmac */
+ /* Re-enable lmac */
cmr_cfg |= CMR_EN;
bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
+ xcv_setup_link(lmac->link_up, lmac->last_speed);
}
static void bgx_lmac_handler(struct net_device *netdev)
@@ -314,7 +325,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -328,7 +339,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return 0;
@@ -356,7 +367,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
if (!bgx)
return;
@@ -379,8 +390,9 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
}
EXPORT_SYMBOL(bgx_lmac_internal_loopback);
-static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
{
+ int lmacid = lmac->lmacid;
u64 cfg;
bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
@@ -409,18 +421,29 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
- if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
- PCS_MRX_STATUS_AN_CPT, false)) {
- dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
- return -1;
+ if (lmac->lmac_type == BGX_MODE_QSGMII) {
+ /* Disable disparity check for QSGMII */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg &= ~PCS_MISC_CTL_DISP_EN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ return 0;
+ }
+
+ if (lmac->lmac_type == BGX_MODE_SGMII) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
}
return 0;
}
-static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
{
u64 cfg;
+ int lmacid = lmac->lmacid;
/* Reset SPU */
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
@@ -436,12 +459,14 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
/* Set interleaved running disparity for RXAUI */
- if (bgx->lmac_type != BGX_MODE_RXAUI)
- bgx_reg_modify(bgx, lmacid,
- BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- else
+ if (lmac->lmac_type == BGX_MODE_RXAUI)
bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
- SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+ SPU_MISC_CTL_INTLV_RDISP);
+
+ /* Clear receive packet disable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
/* clear all interrupts */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
@@ -451,7 +476,7 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
- if (bgx->use_training) {
+ if (lmac->use_training) {
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
@@ -474,9 +499,9 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
- if (bgx->lmac_type == BGX_MODE_10G_KR)
+ if (lmac->lmac_type == BGX_MODE_10G_KR)
cfg |= (1 << 23);
- else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ else if (lmac->lmac_type == BGX_MODE_40G_KR)
cfg |= (1 << 24);
else
cfg &= ~((1 << 23) | (1 << 24));
@@ -511,11 +536,10 @@ static int bgx_xaui_check_link(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
int lmacid = lmac->lmacid;
- int lmac_type = bgx->lmac_type;
+ int lmac_type = lmac->lmac_type;
u64 cfg;
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -556,7 +580,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
- if (bgx->use_training) {
+ if (lmac->use_training) {
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
if (!(cfg & (1ull << 13))) {
cfg = (1ull << 13) | (1ull << 14);
@@ -584,11 +608,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Clear receive packet disable */
- cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
- cfg &= ~SPU_MISC_CTL_RX_DIS;
- bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
-
/* Check for MAC RX faults */
cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
/* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
@@ -599,7 +618,7 @@ static int bgx_xaui_check_link(struct lmac *lmac)
/* Rx local/remote fault seen.
* Do lmac reinit to see if condition recovers
*/
- bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+ bgx_lmac_xaui_init(bgx, lmac);
return -1;
}
@@ -623,7 +642,7 @@ static void bgx_poll_for_link(struct work_struct *work)
if ((spu_link & SPU_STATUS1_RCV_LNK) &&
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
- if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ if (lmac->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
else
lmac->last_speed = 10000;
@@ -649,6 +668,16 @@ static void bgx_poll_for_link(struct work_struct *work)
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
}
+static int phy_interface_mode(u8 lmac_type)
+{
+ if (lmac_type == BGX_MODE_QSGMII)
+ return PHY_INTERFACE_MODE_QSGMII;
+ if (lmac_type == BGX_MODE_RGMII)
+ return PHY_INTERFACE_MODE_RGMII;
+
+ return PHY_INTERFACE_MODE_SGMII;
+}
+
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
@@ -657,13 +686,15 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
- if (bgx->lmac_type == BGX_MODE_SGMII) {
+ if ((lmac->lmac_type == BGX_MODE_SGMII) ||
+ (lmac->lmac_type == BGX_MODE_QSGMII) ||
+ (lmac->lmac_type == BGX_MODE_RGMII)) {
lmac->is_sgmii = 1;
- if (bgx_lmac_sgmii_init(bgx, lmacid))
+ if (bgx_lmac_sgmii_init(bgx, lmac))
return -1;
} else {
lmac->is_sgmii = 0;
- if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ if (bgx_lmac_xaui_init(bgx, lmac))
return -1;
}
@@ -685,10 +716,10 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR)) {
if (!lmac->phydev)
return -ENODEV;
@@ -696,7 +727,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
if (phy_connect_direct(&lmac->netdev, lmac->phydev,
bgx_lmac_handler,
- PHY_INTERFACE_MODE_SGMII))
+ phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
phy_start_aneg(lmac->phydev);
@@ -753,76 +784,19 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
bgx_flush_dmac_addrs(bgx, lmacid);
- if ((bgx->lmac_type != BGX_MODE_XFI) &&
- (bgx->lmac_type != BGX_MODE_XLAUI) &&
- (bgx->lmac_type != BGX_MODE_40G_KR) &&
- (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
lmac->phydev = NULL;
}
-static void bgx_set_num_ports(struct bgx *bgx)
-{
- u64 lmac_count;
-
- switch (bgx->qlm_mode) {
- case QLM_MODE_SGMII:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_SGMII;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_RXAUI_2X2:
- bgx->lmac_count = 2;
- bgx->lmac_type = BGX_MODE_RXAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_XFI_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_XFI;
- bgx->lane_to_sds = 0;
- break;
- case QLM_MODE_XLAUI_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_XLAUI;
- bgx->lane_to_sds = 0xE4;
- break;
- case QLM_MODE_10G_KR_4X1:
- bgx->lmac_count = 4;
- bgx->lmac_type = BGX_MODE_10G_KR;
- bgx->lane_to_sds = 0;
- bgx->use_training = 1;
- break;
- case QLM_MODE_40G_KR4_1X4:
- bgx->lmac_count = 1;
- bgx->lmac_type = BGX_MODE_40G_KR;
- bgx->lane_to_sds = 0xE4;
- bgx->use_training = 1;
- break;
- default:
- bgx->lmac_count = 0;
- break;
- }
-
- /* Check if low level firmware has programmed LMAC count
- * based on board type, if yes consider that otherwise
- * the default static values
- */
- lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
- if (lmac_count != 4)
- bgx->lmac_count = lmac_count;
-}
-
static void bgx_init_hw(struct bgx *bgx)
{
int i;
-
- bgx_set_num_ports(bgx);
+ struct lmac *lmac;
bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
@@ -830,17 +804,9 @@ static void bgx_init_hw(struct bgx *bgx)
/* Set lmac type and lane2serdes mapping */
for (i = 0; i < bgx->lmac_count; i++) {
- if (bgx->lmac_type == BGX_MODE_RXAUI) {
- if (i)
- bgx->lane_to_sds = 0x0e;
- else
- bgx->lane_to_sds = 0x04;
- bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | bgx->lane_to_sds);
- continue;
- }
+ lmac = &bgx->lmac[i];
bgx_reg_write(bgx, i, BGX_CMRX_CFG,
- (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ (lmac->lmac_type << 8) | lmac->lane_to_sds);
bgx->lmac[i].lmacid_bd = lmac_count;
lmac_count++;
}
@@ -863,55 +829,212 @@ static void bgx_init_hw(struct bgx *bgx)
bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
}
-static void bgx_get_qlm_mode(struct bgx *bgx)
+static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
+{
+ return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
+}
+
+static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
{
struct device *dev = &bgx->pdev->dev;
- int lmac_type;
- int train_en;
+ struct lmac *lmac;
+ char str[20];
+ u8 dlm;
- /* Read LMAC0 type to figure out QLM mode
- * This is configured by low level firmware
- */
- lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
- lmac_type = (lmac_type >> 8) & 0x07;
+ if (lmacid > bgx->max_lmac)
+ return;
- train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
- SPU_PMD_CRTL_TRAIN_EN;
+ lmac = &bgx->lmac[lmacid];
+ dlm = (lmacid / 2) + (bgx->bgx_id * 2);
+ if (!bgx->is_dlm)
+ sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
+ else
+ sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
- switch (lmac_type) {
+ switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ dev_info(dev, "%s: SGMII\n", (char *)str);
break;
case BGX_MODE_XAUI:
- bgx->qlm_mode = QLM_MODE_XAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: XAUI\n", (char *)str);
break;
case BGX_MODE_RXAUI:
- bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
- dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ dev_info(dev, "%s: RXAUI\n", (char *)str);
break;
case BGX_MODE_XFI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XFI_4X1;
- dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
- dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XFI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 10G_KR\n", (char *)str);
break;
case BGX_MODE_XLAUI:
- if (!train_en) {
- bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
- dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
- } else {
- bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
- dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
- }
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XLAUI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 40G_KR4\n", (char *)str);
+ break;
+ case BGX_MODE_QSGMII:
+ if ((lmacid == 0) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
+ return;
+ if ((lmacid == 2) &&
+ (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
+ return;
+ dev_info(dev, "%s: QSGMII\n", (char *)str);
+ break;
+ case BGX_MODE_RGMII:
+ dev_info(dev, "%s: RGMII\n", (char *)str);
+ break;
+ case BGX_MODE_INVALID:
+ /* Nothing to do */
+ break;
+ }
+}
+
+static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
+{
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ case BGX_MODE_XFI:
+ lmac->lane_to_sds = lmac->lmacid;
+ break;
+ case BGX_MODE_XAUI:
+ case BGX_MODE_XLAUI:
+ case BGX_MODE_RGMII:
+ lmac->lane_to_sds = 0xE4;
+ break;
+ case BGX_MODE_RXAUI:
+ lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
+ break;
+ case BGX_MODE_QSGMII:
+ /* There is no way to determine if DLM0/2 is QSGMII or
+ * DLM1/3 is configured to QSGMII as bootloader will
+ * configure all LMACs, so take whatever is configured
+ * by low level firmware.
+ */
+ lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
break;
default:
- bgx->qlm_mode = QLM_MODE_SGMII;
- dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ lmac->lane_to_sds = 0;
+ break;
+ }
+}
+
+static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
+{
+ if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR)) {
+ lmac->use_training = 0;
+ return;
+ }
+
+ lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+}
+
+static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+{
+ struct lmac *lmac;
+ struct lmac *olmac;
+ u64 cmr_cfg;
+ u8 lmac_type;
+ u8 lane_to_sds;
+
+ lmac = &bgx->lmac[idx];
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
+ if (bgx->is_rgx)
+ lmac->lmac_type = BGX_MODE_RGMII;
+ lmac_set_training(bgx, lmac, 0);
+ lmac_set_lane2sds(bgx, lmac);
+ return;
+ }
+
+ /* On 81xx BGX can be split across 2 DLMs
+ * firmware programs lmac_type of LMAC0 and LMAC2
+ */
+ if ((idx == 0) || (idx == 2)) {
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is not reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
+ lmac_set_lane2sds(bgx, lmac);
+
+ /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
+ olmac = &bgx->lmac[idx + 1];
+ olmac->lmac_type = lmac->lmac_type;
+ lmac_set_training(bgx, olmac, olmac->lmacid);
+ lmac_set_lane2sds(bgx, olmac);
+ }
+}
+
+static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+
+ if (!bgx->is_dlm)
+ return true;
+
+ lmac = &bgx->lmac[0];
+ if (lmac->lmac_type == BGX_MODE_INVALID)
+ return false;
+
+ return true;
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ struct lmac *lmac01;
+ struct lmac *lmac23;
+ u8 idx;
+
+ /* Init all LMAC's type to invalid */
+ for (idx = 0; idx < bgx->max_lmac; idx++) {
+ lmac = &bgx->lmac[idx];
+ lmac->lmacid = idx;
+ lmac->lmac_type = BGX_MODE_INVALID;
+ lmac->use_training = false;
+ }
+
+ /* It is assumed that low level firmware sets this value */
+ bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (bgx->lmac_count > bgx->max_lmac)
+ bgx->lmac_count = bgx->max_lmac;
+
+ for (idx = 0; idx < bgx->max_lmac; idx++)
+ bgx_set_lmac_config(bgx, idx);
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ bgx_print_qlm_mode(bgx, 0);
+ return;
+ }
+
+ if (bgx->lmac_count) {
+ bgx_print_qlm_mode(bgx, 0);
+ bgx_print_qlm_mode(bgx, 2);
+ }
+
+ /* If DLM0 is not in BGX mode then LMAC0/1 have
+ * to be configured with serdes lanes of DLM1
+ */
+ if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
+ return;
+ for (idx = 0; idx < bgx->lmac_count; idx++) {
+ lmac01 = &bgx->lmac[idx];
+ lmac23 = &bgx->lmac[idx + 2];
+ lmac01->lmac_type = lmac23->lmac_type;
+ lmac01->lane_to_sds = lmac23->lane_to_sds;
}
}
@@ -1042,7 +1165,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
}
lmac++;
- if (lmac == MAX_LMAC_PER_BGX) {
+ if (lmac == bgx->max_lmac) {
of_node_put(node);
break;
}
@@ -1087,6 +1210,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
u8 lmac;
+ u16 sdevid;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
@@ -1115,10 +1239,30 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_release_regions;
}
- bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
- bgx_vnic[bgx->bgx_id] = bgx;
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
+ if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
+ bgx->bgx_id =
+ (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->max_lmac = MAX_LMAC_PER_BGX;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ } else {
+ bgx->is_rgx = true;
+ bgx->max_lmac = 1;
+ bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ xcv_init_hw();
+ }
+
+ /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
+ * BGX i.e BGX2 can be split across 2 DLMs.
+ */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
+ ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
+ bgx->is_dlm = true;
+
bgx_get_qlm_mode(bgx);
err = bgx_init_phy(bgx);
@@ -1133,6 +1277,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(dev, "BGX%d failed to enable lmac%d\n",
bgx->bgx_id, lmac);
+ while (lmac)
+ bgx_lmac_disable(bgx, --lmac);
goto err_enable;
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 42010d2e5ddf..d59c71e4a000 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -9,8 +9,20 @@
#ifndef THUNDER_BGX_H
#define THUNDER_BGX_H
-#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+/* PCI device ID */
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+#define PCI_DEVICE_ID_THUNDER_RGX 0xA054
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
+#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
+#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
+
+#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
#define MAX_BGX_PER_CN88XX 2
+#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
+#define MAX_BGX_PER_CN83XX 4
+#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
@@ -18,8 +30,6 @@
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
-#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
-
/* Registers */
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
@@ -136,6 +146,7 @@
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020
@@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
@@ -213,16 +227,9 @@ enum LMAC_TYPE {
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
-};
-
-enum qlm_mode {
- QLM_MODE_SGMII, /* SGMII, each lane independent */
- QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
- QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
- QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
- QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
- QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
- QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+ BGX_MODE_RGMII = 5,
+ BGX_MODE_QSGMII = 6,
+ BGX_MODE_INVALID = 7,
};
#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
new file mode 100644
index 000000000000..67befedef709
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-xcv"
+#define DRV_VERSION "1.0"
+
+/* Register offsets */
+#define XCV_RESET 0x00
+#define PORT_EN BIT_ULL(63)
+#define CLK_RESET BIT_ULL(15)
+#define DLL_RESET BIT_ULL(11)
+#define COMP_EN BIT_ULL(7)
+#define TX_PKT_RESET BIT_ULL(3)
+#define TX_DATA_RESET BIT_ULL(2)
+#define RX_PKT_RESET BIT_ULL(1)
+#define RX_DATA_RESET BIT_ULL(0)
+#define XCV_DLL_CTL 0x10
+#define CLKRX_BYP BIT_ULL(23)
+#define CLKTX_BYP BIT_ULL(15)
+#define XCV_COMP_CTL 0x20
+#define DRV_BYP BIT_ULL(63)
+#define XCV_CTL 0x30
+#define XCV_INT 0x40
+#define XCV_INT_W1S 0x48
+#define XCV_INT_ENA_W1C 0x50
+#define XCV_INT_ENA_W1S 0x58
+#define XCV_INBND_STATUS 0x80
+#define XCV_BATCH_CRD_RET 0x100
+
+struct xcv {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct xcv *xcv;
+
+/* Supported devices */
+static const struct pci_device_id xcv_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, xcv_id_table);
+
+void xcv_init_hw(void)
+{
+ u64 cfg;
+
+ /* Take DLL out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~DLL_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Take clock tree out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ /* Wait for DLL to lock */
+ msleep(1);
+
+ /* Configure DLL - enable or bypass
+ * TX no bypass, RX bypass
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL);
+ cfg &= ~0xFF03;
+ cfg |= CLKRX_BYP;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL);
+
+ /* Enable compensation controller and force the
+ * write to be visible to HW by readig back.
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= COMP_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ /* Wait for compensation state machine to lock */
+ msleep(10);
+
+ /* enable the XCV block */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= PORT_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+}
+EXPORT_SYMBOL(xcv_init_hw);
+
+void xcv_setup_link(bool link_up, int link_speed)
+{
+ u64 cfg;
+ int speed = 2;
+
+ if (!xcv) {
+ dev_err(&xcv->pdev->dev,
+ "XCV init not done, probe may have failed\n");
+ return;
+ }
+
+ if (link_speed == 100)
+ speed = 1;
+ else if (link_speed == 10)
+ speed = 0;
+
+ if (link_up) {
+ /* set operating speed */
+ cfg = readq_relaxed(xcv->reg_base + XCV_CTL);
+ cfg &= ~0x03;
+ cfg |= speed;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_CTL);
+
+ /* Reset datapaths */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_DATA_RESET | RX_DATA_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Enable the packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_PKT_RESET | RX_PKT_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Return credits to RGX */
+ writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET);
+ } else {
+ /* Disable packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~(TX_PKT_RESET | RX_PKT_RESET);
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ }
+}
+EXPORT_SYMBOL(xcv_setup_link);
+
+static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL);
+ if (!xcv)
+ return -ENOMEM;
+ xcv->pdev = pdev;
+
+ pci_set_drvdata(pdev, xcv);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_kfree;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!xcv->reg_base) {
+ dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_kfree:
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ return err;
+}
+
+static void xcv_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (xcv) {
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver xcv_driver = {
+ .name = DRV_NAME,
+ .id_table = xcv_id_table,
+ .probe = xcv_probe,
+ .remove = xcv_remove,
+};
+
+static int __init xcv_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&xcv_driver);
+}
+
+static void __exit xcv_cleanup_module(void)
+{
+ pci_unregister_driver(&xcv_driver);
+}
+
+module_init(xcv_init_module);
+module_exit(xcv_cleanup_module);