diff options
Diffstat (limited to 'drivers/net/ethernet/cavium')
49 files changed, 7160 insertions, 1235 deletions
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 92f411c9f0df..bbc8bd16cb97 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -53,7 +53,7 @@ config THUNDER_NIC_RGX config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT - select PTP_1588_CLOCK + imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C ---help--- @@ -74,4 +74,16 @@ config OCTEON_MGMT_ETHERNET port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, CN54XX, CN52XX, and CN6XXX chips. +config LIQUIDIO_VF + tristate "Cavium LiquidIO VF support" + depends on 64BIT && PCI_MSI + select PTP_1588_CLOCK + ---help--- + This driver supports Cavium LiquidIO Intelligent Server Adapter + based on CN23XX chips. + + To compile this driver as a module, choose M here: The module + will be called liquidio_vf. MSI-X interrupt support is required + for this driver to work correctly + endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile index 5a27b2a44039..c4d411d1aa28 100644 --- a/drivers/net/ethernet/cavium/liquidio/Makefile +++ b/drivers/net/ethernet/cavium/liquidio/Makefile @@ -11,8 +11,32 @@ liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \ cn66xx_device.o \ cn68xx_device.o \ cn23xx_pf_device.o \ + cn23xx_vf_device.o \ + octeon_mailbox.o \ octeon_mem_ops.o \ octeon_droq.o \ octeon_nic.o liquidio-objs := lio_main.o octeon_console.o $(liquidio-y) + +obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o + +ifeq ($(CONFIG_LIQUIDIO)$(CONFIG_LIQUIDIO_VF), yy) + liquidio_vf-objs := lio_vf_main.o +else +liquidio_vf-$(CONFIG_LIQUIDIO_VF) += lio_ethtool.o \ + lio_core.o \ + request_manager.o \ + response_manager.o \ + octeon_device.o \ + cn66xx_device.o \ + cn68xx_device.o \ + cn23xx_pf_device.o \ + cn23xx_vf_device.o \ + octeon_mailbox.o \ + octeon_mem_ops.o \ + octeon_droq.o \ + octeon_nic.o + +liquidio_vf-objs := lio_vf_main.o $(liquidio_vf-y) +endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 380a64115a98..962dcbcef8b5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1,28 +1,23 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> -#include <linux/netdevice.h> #include <linux/vmalloc.h> +#include <linux/etherdevice.h> #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -30,6 +25,7 @@ #include "octeon_device.h" #include "cn23xx_pf_device.h" #include "octeon_main.h" +#include "octeon_mailbox.h" #define RESET_NOTDONE 0 #define RESET_DONE 1 @@ -40,11 +36,6 @@ */ #define CN23XX_INPUT_JABBER 64600 -#define LIOLUT_RING_DISTRIBUTION 9 -const int liolut_num_vfs_to_rings_per_vf[LIOLUT_RING_DISTRIBUTION] = { - 0, 8, 4, 2, 2, 2, 1, 1, 1 -}; - void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) { int i = 0; @@ -309,9 +300,10 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) { - u64 reg_val; u16 mac_no = oct->pcie_port; u16 pf_num = oct->pf_num; + u64 reg_val; + u64 temp; /* programming SRN and TRS for each MAC(0..3) */ @@ -333,6 +325,14 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) /* setting TRS <23:16> */ reg_val = reg_val | (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); + /* setting RPVF <39:32> */ + temp = oct->sriov_info.rings_per_vf & 0xff; + reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS); + + /* setting NVFS <55:48> */ + temp = oct->sriov_info.max_vfs & 0xff; + reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS); + /* write these settings to MAC register */ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), reg_val); @@ -399,11 +399,12 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct) static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) { + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + struct octeon_instr_queue *iq; + u64 intr_threshold, reg_val; u32 q_no, ern, srn; u64 pf_num; - u64 intr_threshold, reg_val; - struct octeon_instr_queue *iq; - struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u64 vf_num; pf_num = oct->pf_num; @@ -414,12 +415,22 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) return -1; /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg - * for all queues.Only PF can set these bits. - * bits 29:30 indicate the MAC num. - * bits 32:47 indicate the PVF num. - */ + * for all queues.Only PF can set these bits. + * bits 29:30 indicate the MAC num. + * bits 32:47 indicate the PVF num. + */ for (q_no = 0; q_no < ern; q_no++) { reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + + /* for VF assigned queues. */ + if (q_no < oct->sriov_info.pf_srn) { + vf_num = q_no / oct->sriov_info.rings_per_vf; + vf_num += 1; /* VF1, VF2,........ */ + } else { + vf_num = 0; + } + + reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS; reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS; octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), @@ -530,8 +541,8 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK); /** Disabling setting OQs in reset when ring has no dorebells - * enabling this will cause of head of line blocking - */ + * enabling this will cause of head of line blocking + */ /* Do it only for pass1.1. and pass1.2 */ if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) || (oct->rev_id == OCTEON_CN23XX_REV_1_1)) @@ -662,6 +673,118 @@ static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) } } +static void cn23xx_pf_mbox_thread(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr; + struct octeon_device *oct = mbox->oct_dev; + u64 mbox_int_val, val64; + u32 q_no, i; + + if (oct->rev_id < OCTEON_CN23XX_REV_1_1) { + /*read and clear by writing 1*/ + mbox_int_val = readq(mbox->mbox_int_reg); + writeq(mbox_int_val, mbox->mbox_int_reg); + + for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + val64 = readq(oct->mbox[q_no]->mbox_write_reg); + + if (val64 && (val64 != OCTEON_PFVFACK)) { + if (octeon_mbox_read(oct->mbox[q_no])) + octeon_mbox_process_message( + oct->mbox[q_no]); + } + } + + schedule_delayed_work(&wk->work, msecs_to_jiffies(10)); + } else { + octeon_mbox_process_message(mbox); + } +} + +static int cn23xx_setup_pf_mbox(struct octeon_device *oct) +{ + struct octeon_mbox *mbox = NULL; + u16 mac_no = oct->pcie_port; + u16 pf_num = oct->pf_num; + u32 q_no, i; + + if (!oct->sriov_info.max_vfs) + return 0; + + for (i = 0; i < oct->sriov_info.max_vfs; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + mbox = vmalloc(sizeof(*mbox)); + if (!mbox) + goto free_mbox; + + memset(mbox, 0, sizeof(struct octeon_mbox)); + + spin_lock_init(&mbox->lock); + + mbox->oct_dev = oct; + + mbox->q_no = q_no; + + mbox->state = OCTEON_MBOX_STATE_IDLE; + + /* PF mbox interrupt reg */ + mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num); + + /* PF writes into SIG0 reg */ + mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0); + + /* PF reads from SIG1 reg */ + mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1); + + /*Mail Box Thread creation*/ + INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, + cn23xx_pf_mbox_thread); + mbox->mbox_poll_wk.ctxptr = (void *)mbox; + + oct->mbox[q_no] = mbox; + + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } + + if (oct->rev_id < OCTEON_CN23XX_REV_1_1) + schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, + msecs_to_jiffies(0)); + + return 0; + +free_mbox: + while (i) { + i--; + vfree(oct->mbox[i]); + } + + return 1; +} + +static int cn23xx_free_pf_mbox(struct octeon_device *oct) +{ + u32 q_no, i; + + if (!oct->sriov_info.max_vfs) + return 0; + + for (i = 0; i < oct->sriov_info.max_vfs; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + cancel_delayed_work_sync( + &oct->mbox[q_no]->mbox_poll_wk.work); + vfree(oct->mbox[q_no]); + } + + return 0; +} + static int cn23xx_enable_io_queues(struct octeon_device *oct) { u64 reg_val; @@ -856,6 +979,29 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev) return ret; } +static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct) +{ + struct delayed_work *work; + u64 mbox_int_val; + u32 i, q_no; + + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + + for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + if (mbox_int_val & BIT_ULL(q_no)) { + writeq(BIT_ULL(q_no), + oct->mbox[0]->mbox_int_reg); + if (octeon_mbox_read(oct->mbox[q_no])) { + work = &oct->mbox[q_no]->mbox_poll_wk.work; + schedule_delayed_work(work, + msecs_to_jiffies(0)); + } + } + } +} + static irqreturn_t cn23xx_interrupt_handler(void *dev) { struct octeon_device *oct = (struct octeon_device *)dev; @@ -871,6 +1017,10 @@ static irqreturn_t cn23xx_interrupt_handler(void *dev) dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n", oct->octeon_id, CVM_CAST64(intr64)); + /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */ + if (intr64 & CN23XX_INTR_VF_MBOX) + cn23xx_handle_pf_mbox_intr(oct); + if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) { if (intr64 & CN23XX_INTR_PKT_DATA) oct->int_status |= OCT_DEV_INTR_PKT_DATA; @@ -961,6 +1111,13 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag) intr_val = readq(cn23xx->intr_enb_reg64); intr_val |= CN23XX_INTR_PKT_DATA; writeq(intr_val, cn23xx->intr_enb_reg64); + } else if ((intr_flag & OCTEON_MBOX_INTR) && + (oct->sriov_info.max_vfs > 0)) { + if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val |= CN23XX_INTR_VF_MBOX; + writeq(intr_val, cn23xx->intr_enb_reg64); + } } } @@ -976,6 +1133,13 @@ static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag) intr_val = readq(cn23xx->intr_enb_reg64); intr_val &= ~CN23XX_INTR_PKT_DATA; writeq(intr_val, cn23xx->intr_enb_reg64); + } else if ((intr_flag & OCTEON_MBOX_INTR) && + (oct->sriov_info.max_vfs > 0)) { + if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val &= ~CN23XX_INTR_VF_MBOX; + writeq(intr_val, cn23xx->intr_enb_reg64); + } } } @@ -1048,50 +1212,59 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct) static int cn23xx_sriov_config(struct octeon_device *oct) { - u32 total_rings; struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; - /* num_vfs is already filled for us */ + u32 max_rings, total_rings, max_vfs, rings_per_vf; u32 pf_srn, num_pf_rings; + u32 max_possible_vfs; cn23xx->conf = - (struct octeon_config *)oct_get_config_info(oct, LIO_23XX); + (struct octeon_config *)oct_get_config_info(oct, LIO_23XX); switch (oct->rev_id) { case OCTEON_CN23XX_REV_1_0: - total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0; + max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0; break; case OCTEON_CN23XX_REV_1_1: - total_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1; + max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1; break; default: - total_rings = CN23XX_MAX_RINGS_PER_PF; + max_rings = CN23XX_MAX_RINGS_PER_PF; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF; break; } - if (!oct->sriov_info.num_pf_rings) { - if (total_rings > num_present_cpus()) - num_pf_rings = num_present_cpus(); - else - num_pf_rings = total_rings; - } else { - num_pf_rings = oct->sriov_info.num_pf_rings; - if (num_pf_rings > total_rings) { - dev_warn(&oct->pci_dev->dev, - "num_queues_per_pf requested %u is more than available rings. Reducing to %u\n", - num_pf_rings, total_rings); - num_pf_rings = total_rings; - } - } + if (max_rings <= num_present_cpus()) + num_pf_rings = 1; + else + num_pf_rings = num_present_cpus(); + +#ifdef CONFIG_PCI_IOV + max_vfs = min_t(u32, + (max_rings - num_pf_rings), max_possible_vfs); + rings_per_vf = 1; +#else + max_vfs = 0; + rings_per_vf = 0; +#endif + + total_rings = num_pf_rings + max_vfs; - total_rings = num_pf_rings; /* the first ring of the pf */ pf_srn = total_rings - num_pf_rings; oct->sriov_info.trs = total_rings; + oct->sriov_info.max_vfs = max_vfs; + oct->sriov_info.rings_per_vf = rings_per_vf; oct->sriov_info.pf_srn = pf_srn; oct->sriov_info.num_pf_rings = num_pf_rings; - dev_dbg(&oct->pci_dev->dev, "trs:%d pf_srn:%d num_pf_rings:%d\n", - oct->sriov_info.trs, oct->sriov_info.pf_srn, - oct->sriov_info.num_pf_rings); + dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n", + oct->sriov_info.trs, oct->sriov_info.max_vfs, + oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn, + oct->sriov_info.num_pf_rings); + + oct->sriov_info.sriov_enabled = 0; + return 0; } @@ -1119,6 +1292,9 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs; oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs; + oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox; + oct->fn_list.free_mbox = cn23xx_free_pf_mbox; + oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler; oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler; @@ -1209,8 +1385,7 @@ void cn23xx_dump_iq_regs(struct octeon_device *oct) dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), CVM_CAST64(octeon_read_csr64 - (oct, - CN23XX_SLI_IQ_PKT_CONTROL64(q_no)))); + (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)))); } pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); @@ -1235,3 +1410,24 @@ int cn23xx_fw_loaded(struct octeon_device *oct) val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1); return (val >> 1) & 1ULL; } + +void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, + u8 *mac) +{ + if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) { + struct octeon_mbox_cmd mbox_cmd; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 0; + mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR; + mbox_cmd.msg.s.len = 1; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = NULL; + mbox_cmd.fn_arg = 0; + ether_addr_copy(mbox_cmd.msg.s.params, mac); + mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; + octeon_mbox_write(oct, &mbox_cmd); + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 21b5c9051967..2fedd91f3df8 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -1,34 +1,31 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn23xx_device.h * \brief Host Driver: Routines that perform CN23XX specific operations. -*/ + */ #ifndef __CN23XX_PF_DEVICE_H__ #define __CN23XX_PF_DEVICE_H__ #include "cn23xx_pf_regs.h" +#define LIO_CMD_WAIT_TM 100 + /* Register address and configuration for a CN23XX devices. * If device specific changes need to be made then add a struct to include * device specific fields as shown in the commented section @@ -56,4 +53,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); int cn23xx_fw_loaded(struct octeon_device *oct); + +void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, + u8 *mac); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h index 03d79d95ab75..e6d4ad99cc38 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h @@ -1,29 +1,24 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn23xx_regs.h * \brief Host Driver: Register Address and Register Mask values for * Octeon CN23XX devices. -*/ + */ #ifndef __CN23XX_PF_REGS_H__ #define __CN23XX_PF_REGS_H__ @@ -63,7 +58,7 @@ #define CN23XX_CONFIG_SRIOV_BAR_START 0x19C #define CN23XX_CONFIG_SRIOV_BARX(i) \ - (CN23XX_CONFIG_SRIOV_BAR_START + (i * 4)) + (CN23XX_CONFIG_SRIOV_BAR_START + ((i) * 4)) #define CN23XX_CONFIG_SRIOV_BAR_PF 0x08 #define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04 #define CN23XX_CONFIG_SRIOV_BAR_IO 0x01 @@ -513,7 +508,7 @@ /* 4 Registers (64 - bit) */ #define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80 #define CN23XX_SLI_S2M_PORTX_CTL(port) \ - (CN23XX_SLI_S2M_PORT_CTL_START + (port * 0x10)) + (CN23XX_SLI_S2M_PORT_CTL_START + ((port) * 0x10)) #define CN23XX_SLI_MAC_NUMBER 0x20050 @@ -554,26 +549,26 @@ * Provides DMA Engine Queue Enable */ #define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL -#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + (eng * 8)) +#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + ((eng) * 8)) /* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL * Provides control bits for transaction on 8 Queues */ #define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL #define CN23XX_DPI_DMA_REQQ_CTL(q_no) \ - (CN23XX_DPI_DMA_REQQ0_CTL + (q_no * 8)) + (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8)) /* 6 register (64-bit) - DPI_ENG(0..5)_BUF * Provides DMA Engine FIFO (Queue) Size */ #define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL #define CN23XX_DPI_DMA_ENG_BUF(eng) \ - (CN23XX_DPI_DMA_ENG0_BUF + (eng * 8)) + (CN23XX_DPI_DMA_ENG0_BUF + ((eng) * 8)) /* 4 Registers (64-bit) */ #define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL #define CN23XX_DPI_SLI_PRTX_CFG(port) \ - (CN23XX_DPI_SLI_PRT_CFG_START + (port * 0x8)) + (CN23XX_DPI_SLI_PRT_CFG_START + ((port) * 0x8)) /* Masks for DPI_DMA_CONTROL Register */ #define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c new file mode 100644 index 000000000000..b6117b6a1de2 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -0,0 +1,722 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "cn23xx_vf_device.h" +#include "octeon_main.h" +#include "octeon_mailbox.h" + +u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) +{ + /* This gives the SLI clock per microsec */ + u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us; + + /* This gives the clock cycles per millisecond */ + oqticks_per_us *= 1000; + + /* This gives the oq ticks (1024 core clock cycles) per millisecond */ + oqticks_per_us /= 1024; + + /* time_intr is in microseconds. The next 2 steps gives the oq ticks + * corressponding to time_intr. + */ + oqticks_per_us *= time_intr_in_us; + oqticks_per_us /= 1000; + + return oqticks_per_us; +} + +static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues) +{ + u32 loop = BUSY_READING_REG_VF_LOOP_COUNT; + int ret_val = 0; + u32 q_no; + u64 d64; + + for (q_no = 0; q_no < num_queues; q_no++) { + /* set RST bit to 1. This bit applies to both IQ and OQ */ + d64 = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + d64 |= CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + d64); + } + + /* wait until the RST bit is clear or the RST and QUIET bits are set */ + for (q_no = 0; q_no < num_queues; q_no++) { + u64 reg_val = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) && + !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) && + loop) { + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); + loop--; + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + WRITE_ONCE(reg_val, READ_ONCE(reg_val) & + ~CN23XX_PKT_INPUT_CTL_RST); + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + READ_ONCE(reg_val)); + + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); + if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "clearing the reset failed for qno: %u\n", + q_no); + ret_val = -1; + } + } + + return ret_val; +} + +static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + struct octeon_instr_queue *iq; + u64 q_no, intr_threshold; + u64 d64; + + if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf)) + return -1; + + for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { + void __iomem *inst_cnt_reg; + + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no), + 0xFFFFFFFF); + iq = oct->instr_queue[q_no]; + + if (iq) + inst_cnt_reg = iq->inst_cnt_reg; + else + inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no); + + d64 = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)); + + d64 &= 0xEFFFFFFFFFFFFFFFL; + + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + d64); + + /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for + * the Input Queues + */ + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + CN23XX_PKT_INPUT_CTL_MASK); + + /* set the wmark level to trigger PI_INT */ + intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & + CN23XX_PKT_IN_DONE_WMARK_MASK; + + writeq((readq(inst_cnt_reg) & + ~(CN23XX_PKT_IN_DONE_WMARK_MASK << + CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | + (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS), + inst_cnt_reg); + } + return 0; +} + +static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) +{ + u32 reg_val; + u32 q_no; + + for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no), + 0xFFFFFFFF); + + reg_val = + octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no)); + + reg_val &= 0xEFFFFFFFFFFFFFFFL; + + reg_val = + octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + + /* set IPTR & DPTR */ + reg_val |= + (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR); + + /* reset BMODE */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); + + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue ScatterList reset ROR_P, NSR_P + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); + +#ifdef __LITTLE_ENDIAN_BITFIELD + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); +#else + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); +#endif + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue Data reset ROR, NSR + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); + /* set the ES bit */ + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); + + /* write all the selected settings */ + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), + reg_val); + } +} + +static int cn23xx_setup_vf_device_regs(struct octeon_device *oct) +{ + if (cn23xx_vf_setup_global_input_regs(oct)) + return -1; + + cn23xx_vf_setup_global_output_regs(oct); + + return 0; +} + +static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + u64 pkt_in_done; + + /* Write the start of the input queue's ring and its size */ + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no); + dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter (used in flush_iq + * calculation) + */ + pkt_in_done = readq(iq->inst_cnt_reg); + + if (oct->msix_on) { + /* Set CINT_ENB to enable IQ interrupt */ + writeq((pkt_in_done | CN23XX_INTR_CINT_ENB), + iq->inst_cnt_reg); + } + iq->reset_instr_cnt = 0; +} + +static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no) +{ + struct octeon_droq *droq = oct->droq[oq_no]; + + octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count); + + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no), + (droq->buffer_size | (OCT_RH_SIZE << 16))); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no); +} + +static void cn23xx_vf_mbox_thread(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr; + + octeon_mbox_process_message(mbox); +} + +static int cn23xx_free_vf_mbox(struct octeon_device *oct) +{ + cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work); + vfree(oct->mbox[0]); + return 0; +} + +static int cn23xx_setup_vf_mbox(struct octeon_device *oct) +{ + struct octeon_mbox *mbox = NULL; + + mbox = vmalloc(sizeof(*mbox)); + if (!mbox) + return 1; + + memset(mbox, 0, sizeof(struct octeon_mbox)); + + spin_lock_init(&mbox->lock); + + mbox->oct_dev = oct; + + mbox->q_no = 0; + + mbox->state = OCTEON_MBOX_STATE_IDLE; + + /* VF mbox interrupt reg */ + mbox->mbox_int_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0); + /* VF reads from SIG0 reg */ + mbox->mbox_read_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0); + /* VF writes into SIG1 reg */ + mbox->mbox_write_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1); + + INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, + cn23xx_vf_mbox_thread); + + mbox->mbox_poll_wk.ctxptr = mbox; + + oct->mbox[0] = mbox; + + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + + return 0; +} + +static int cn23xx_enable_vf_io_queues(struct octeon_device *oct) +{ + u32 q_no; + + for (q_no = 0; q_no < oct->num_iqs; q_no++) { + u64 reg_val; + + /* set the corresponding IQ IS_64B bit */ + if (oct->io_qmask.iq64B & BIT_ULL(q_no)) { + reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B; + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + + /* set the corresponding IQ ENB bit */ + if (oct->io_qmask.iq & BIT_ULL(q_no)) { + reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB; + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + } + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + u32 reg_val; + + /* set the corresponding OQ ENB bit */ + if (oct->io_qmask.oq & BIT_ULL(q_no)) { + reg_val = octeon_read_csr( + oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB; + octeon_write_csr( + oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val); + } + } + + return 0; +} + +static void cn23xx_disable_vf_io_queues(struct octeon_device *oct) +{ + u32 num_queues = oct->num_iqs; + + /* per HRM, rings can only be disabled via reset operation, + * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB] + */ + if (num_queues < oct->num_oqs) + num_queues = oct->num_oqs; + + cn23xx_vf_reset_io_queues(oct, num_queues); +} + +void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct) +{ + struct octeon_mbox_cmd mbox_cmd; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 0; + mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = NULL; + mbox_cmd.fn_arg = 0; + + octeon_mbox_write(oct, &mbox_cmd); +} + +static void octeon_pfvf_hs_callback(struct octeon_device *oct, + struct octeon_mbox_cmd *cmd, + void *arg) +{ + u32 major = 0; + + memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params, + CN23XX_MAILBOX_MSGPARAM_SIZE); + if (cmd->recv_len > 1) { + major = ((struct lio_version *)(cmd->data))->major; + major = major << 16; + } + + atomic_set((atomic_t *)arg, major | 1); +} + +int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct) +{ + struct octeon_mbox_cmd mbox_cmd; + u32 q_no, count = 0; + atomic_t status; + u32 pfmajor; + u32 vfmajor; + u32 ret; + + /* Sending VF_ACTIVE indication to the PF driver */ + dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n"); + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE; + mbox_cmd.msg.s.len = 2; + mbox_cmd.data[0] = 0; + ((struct lio_version *)&mbox_cmd.data[0])->major = + LIQUIDIO_BASE_MAJOR_VERSION; + ((struct lio_version *)&mbox_cmd.data[0])->minor = + LIQUIDIO_BASE_MINOR_VERSION; + ((struct lio_version *)&mbox_cmd.data[0])->micro = + LIQUIDIO_BASE_MICRO_VERSION; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback; + mbox_cmd.fn_arg = &status; + + /* Interrupts are not enabled at this point. + * Enable them with default oq ticks + */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + octeon_mbox_write(oct, &mbox_cmd); + + atomic_set(&status, 0); + + do { + schedule_timeout_uninterruptible(1); + } while ((!atomic_read(&status)) && (count++ < 100000)); + + /* Disable the interrupt so that the interrupsts will be reenabled + * with the oq ticks received from the PF + */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + ret = atomic_read(&status); + if (!ret) { + dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n"); + return 1; + } + + for (q_no = 0 ; q_no < oct->num_iqs ; q_no++) + oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind; + + vfmajor = LIQUIDIO_BASE_MAJOR_VERSION; + pfmajor = ret >> 16; + if (pfmajor != vfmajor) { + dev_err(&oct->pci_dev->dev, + "VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n", + vfmajor, pfmajor); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, + "VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n", + vfmajor, pfmajor); + + dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n", + oct->pfvf_hsword.pkind); + + return 0; +} + +static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector) +{ + struct octeon_device *oct = ioq_vector->oct_dev; + u64 mbox_int_val; + + if (!ioq_vector->droq_index) { + /* read and clear by writing 1 */ + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg); + if (octeon_mbox_read(oct->mbox[0])) + schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, + msecs_to_jiffies(0)); + } +} + +static u64 cn23xx_vf_msix_interrupt_handler(void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 pkts_sent; + u64 ret = 0; + + dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct); + pkts_sent = readq(droq->pkts_sent_reg); + + /* If our device has interrupted, then proceed. Also check + * for all f's if interrupt was triggered on an error + * and the PCI read fails. + */ + if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) + return ret; + + /* Write count reg in sli_pkt_cnts to clear these int. */ + if ((pkts_sent & CN23XX_INTR_PO_INT) || + (pkts_sent & CN23XX_INTR_PI_INT)) { + if (pkts_sent & CN23XX_INTR_PO_INT) + ret |= MSIX_PO_INT; + } + + if (pkts_sent & CN23XX_INTR_PI_INT) + /* We will clear the count when we update the read_index. */ + ret |= MSIX_PI_INT; + + if (pkts_sent & CN23XX_INTR_MBOX_INT) { + cn23xx_handle_vf_mbox_intr(ioq_vector); + ret |= MSIX_MBOX_INT; + } + + return ret; +} + +static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done; + u32 new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + /* Modulo of the new index with the IQ size will give us + * the new index. The iq->reset_instr_cnt is always zero for + * cn23xx, so no extra adjustments are needed. + */ + new_idx = (iq->octeon_read_index + + (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) % + iq->max_count; + + return new_idx; +} + +static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + u32 q_no, time_threshold; + + if (intr_flag & OCTEON_OUTPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Set up interrupt packet and time thresholds + * for all the OQs + */ + time_threshold = cn23xx_vf_get_oq_ticks( + oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf)); + + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (CFG_GET_OQ_INTR_PKT(cn23xx->conf) | + ((u64)time_threshold << 32))); + } + } + + if (intr_flag & OCTEON_INPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Set CINT_ENB to enable IQ interrupt */ + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + ((octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) & + ~CN23XX_PKT_IN_DONE_CNT_MASK) | + CN23XX_INTR_CINT_ENB)); + } + } + + /* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */ + if (intr_flag & OCTEON_MBOX_INTR) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_PKT_MBOX_INT(0), + (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) | + CN23XX_INTR_MBOX_ENB)); + } +} + +static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + u32 q_no; + + if (intr_flag & OCTEON_OUTPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Write all 1's in INT_LEVEL reg to disable PO_INT */ + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + 0x3fffffffffffff); + } + } + if (intr_flag & OCTEON_INPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + (octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) & + ~(CN23XX_INTR_CINT_ENB | + CN23XX_PKT_IN_DONE_CNT_MASK))); + } + } + + if (intr_flag & OCTEON_MBOX_INTR) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_PKT_MBOX_INT(0), + (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) & + ~CN23XX_INTR_MBOX_ENB)); + } +} + +int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + u32 rings_per_vf, ring_flag; + u64 reg_val; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + /* INPUT_CONTROL[RPVF] gives the VF IOq count */ + reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0)); + + oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_VF_NUM_MASK; + + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + + rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + + ring_flag = 0; + + cn23xx->conf = oct_get_config_info(oct, LIO_23XX); + if (!cn23xx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + if (oct->sriov_info.rings_per_vf > rings_per_vf) { + dev_warn(&oct->pci_dev->dev, + "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n", + oct->sriov_info.rings_per_vf, rings_per_vf, + rings_per_vf); + oct->sriov_info.rings_per_vf = rings_per_vf; + } else { + if (rings_per_vf > num_present_cpus()) { + dev_warn(&oct->pci_dev->dev, + "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n", + rings_per_vf, + num_present_cpus(), + num_present_cpus()); + oct->sriov_info.rings_per_vf = + num_present_cpus(); + } else { + oct->sriov_info.rings_per_vf = rings_per_vf; + } + } + + oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs; + oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs; + oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox; + oct->fn_list.free_mbox = cn23xx_free_vf_mbox; + + oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler; + + oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs; + oct->fn_list.update_iq_read_idx = cn23xx_update_read_index; + + oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt; + oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt; + + oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues; + oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues; + + return 0; +} + +void cn23xx_dump_vf_iq_regs(struct octeon_device *oct) +{ + u32 regval, q_no; + + dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", + CN23XX_VF_SLI_IQ_DOORBELL(0), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_DOORBELL(0)))); + + dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", + CN23XX_VF_SLI_IQ_BASE_ADDR64(0), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0)))); + + dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", + CN23XX_VF_SLI_IQ_SIZE(0), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0)))); + + for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { + dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", + q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)))); + } + + pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); + dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", + CN23XX_CONFIG_PCIE_DEVCTL, regval); +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h new file mode 100644 index 000000000000..3f98c7334957 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h @@ -0,0 +1,50 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_device.h + * \brief Host Driver: Routines that perform CN23XX specific operations. + */ + +#ifndef __CN23XX_VF_DEVICE_H__ +#define __CN23XX_VF_DEVICE_H__ + +#include "cn23xx_vf_regs.h" + +/* Register address and configuration for a CN23XX devices. + * If device specific changes need to be made then add a struct to include + * device specific fields as shown in the commented section + */ +struct octeon_cn23xx_vf { + struct octeon_config *conf; +}; + +#define BUSY_READING_REG_VF_LOOP_COUNT 10000 + +#define CN23XX_MAILBOX_MSGPARAM_SIZE 6 + +#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100 + +void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct); + +int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); + +int cn23xx_setup_octeon_vf_device(struct octeon_device *oct); + +u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); + +void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct); +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h new file mode 100644 index 000000000000..d33dd8f4226f --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h @@ -0,0 +1,274 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_vf_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN23XX vf functions. + */ + +#ifndef __CN23XX_VF_REGS_H__ +#define __CN23XX_VF_REGS_H__ + +#define CN23XX_CONFIG_XPANSION_BAR 0x38 + +#define CN23XX_CONFIG_PCIE_CAP 0x70 +#define CN23XX_CONFIG_PCIE_DEVCAP 0x74 +#define CN23XX_CONFIG_PCIE_DEVCTL 0x78 +#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C +#define CN23XX_CONFIG_PCIE_LINKCTL 0x80 +#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84 +#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN23XX_CONFIG_PCIE_FLTMSK 0x720 + +/* The input jabber is used to determine the TSO max size. + * Due to H/W limitation, this need to be reduced to 60000 + * in order to to H/W TSO and avoid the WQE malfarmation + * PKO_BUG_24989_WQE_LEN + */ +#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/ + +/* ############## BAR0 Registers ################ */ + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_VF_IQ_OFFSET 0x20000 + +/*###################### REQUEST QUEUE #########################*/ + +/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */ +#define CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 0x10040 + +/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */ +#define CN23XX_VF_SLI_IQ_BASE_ADDR_START64 0x10010 + +/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */ +#define CN23XX_VF_SLI_IQ_DOORBELL_START 0x10020 + +/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */ +#define CN23XX_VF_SLI_IQ_SIZE_START 0x10030 + +/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL. + */ +#define CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 0x10000 + +/*------- Request Queue Macros ---------*/ +#define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ + (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ + (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_SIZE(iq) \ + (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ + (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ + (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29) +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25) +#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24) +#define CN23XX_PKT_INPUT_CTL_RST BIT(23) +#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28) +#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22) +#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8) +#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6) +#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5) +#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4) +#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3) +#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2) +#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1) + +/** Rings per Virtual Function [RO] **/ +#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F) +#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48) +/* These bits[47:44][RO] give the Physical function number info within the MAC*/ +#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7) +#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45) +/** These bits[43:32][RO] give the virtual function number info within the PF*/ +#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF) +#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29) +#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL) +#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32) +#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL) + +#ifdef __LITTLE_ENDIAN_BITFIELD +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE \ + | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN23XX_PKT_INPUT_CTL_USE_CSR) +#else +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE \ + | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN23XX_PKT_INPUT_CTL_USE_CSR \ + | CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP) +#endif + +/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */ +#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62) +#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48) + +/*############################ OUTPUT QUEUE #########################*/ + +/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */ +#define CN23XX_VF_SLI_OQ_PKT_CONTROL_START 0x10050 + +/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */ +#define CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE 0x10060 + +/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */ +#define CN23XX_VF_SLI_OQ_BASE_ADDR_START64 0x10070 + +/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */ +#define CN23XX_VF_SLI_OQ_PKT_CREDITS_START 0x10080 + +/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */ +#define CN23XX_VF_SLI_OQ_SIZE_START 0x10090 + +/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */ +#define CN23XX_VF_SLI_OQ_PKT_SENT_START 0x100B0 + +/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */ +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_VF_OQ_OFFSET 0x20000 + +/*------- Output Queue Macros ---------*/ + +#define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \ + (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \ + (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_SIZE(oq) \ + (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +/* Macro's for accessing CNT and TIME separately from INT_LEVELS */ +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + \ + ((oq) * CN23XX_VF_OQ_OFFSET) + 4) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13) +#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12) +#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11) +#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9) +#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8) +#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7) +#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6) +#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5) +#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3) +#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2) +#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1) +#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0) + +/*######################### Mailbox Reg Macros ########################*/ +#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210 +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200 + +#define CN23XX_SLI_MBOX_OFFSET 0x20000 +#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8 + +#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \ + (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) + +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ + (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \ + ((q) * CN23XX_SLI_MBOX_OFFSET + \ + (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET)) + +/*######################## INTERRUPTS #########################*/ + +#define CN23XX_VF_SLI_INT_SUM_START 0x100D0 + +#define CN23XX_VF_SLI_INT_SUM(q) \ + (CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET)) + +/*------------------ Interrupt Masks ----------------*/ + +#define CN23XX_INTR_PO_INT BIT_ULL(63) +#define CN23XX_INTR_PI_INT BIT_ULL(62) +#define CN23XX_INTR_MBOX_INT BIT_ULL(61) +#define CN23XX_INTR_RESEND BIT_ULL(60) + +#define CN23XX_INTR_CINT_ENB BIT_ULL(48) +#define CN23XX_INTR_MBOX_ENB BIT(0) + +/*############################ MIO #########################*/ +#define CN23XX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL +#define CN23XX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL +#define CN23XX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL +#define CN23XX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL +#define CN23XX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL +#define CN23XX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL +#define CN23XX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL +#define CN23XX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL +#define CN23XX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL +#define CN23XX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL +#define CN23XX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL +#define CN23XX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL +#define CN23XX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL +#define CN23XX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL + +/*############################ RST #########################*/ +#define CN23XX_RST_BOOT 0x0001180006001600ULL + +/*######################## MSIX TABLE #########################*/ + +#define CN23XX_MSIX_TABLE_ADDR_START 0x0 +#define CN23XX_MSIX_TABLE_DATA_START 0x8 + +#define CN23XX_MSIX_TABLE_SIZE 0x10 +#define CN23XX_MSIX_TABLE_ENTRIES 0x41 + +#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32) + +#define CN23XX_MSIX_TABLE_ADDR(idx) \ + (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +#define CN23XX_MSIX_TABLE_DATA(idx) \ + (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index e779af88621b..bdec051107a6 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> #include "liquidio_common.h" @@ -275,7 +271,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) { struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; - /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */ octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); /* Write the start of the input queue's ring and its size */ @@ -378,7 +373,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Reset the doorbell register for each Input queue. */ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { - if (!(oct->io_qmask.iq & (1ULL << i))) + if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); @@ -400,9 +395,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) ; /* Reset the doorbell register for each Output queue. */ - /* for (i = 0; i < oct->num_oqs; i++) { */ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { - if (!(oct->io_qmask.oq & (1ULL << i))) + if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); @@ -537,15 +531,14 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) oct->droq_intr = 0; - /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { - if (!(droq_mask & (1ULL << oq_no))) + if (!(droq_mask & BIT_ULL(oq_no))) continue; droq = oct->droq[oq_no]; pkt_count = octeon_droq_check_hw_for_pkts(droq); if (pkt_count) { - oct->droq_intr |= (1ULL << oq_no); + oct->droq_intr |= BIT_ULL(oq_no); if (droq->ops.poll_mode) { u32 value; u32 reg; @@ -721,8 +714,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) int lio_validate_cn6xxx_config_info(struct octeon_device *oct, struct octeon_config *conf6xxx) { - /* int total_instrs = 0; */ - if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", __func__, CFG_GET_IQ_MAX_Q(conf6xxx), diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h index a40a91394079..8ed57134ee0c 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn66xx_device.h * \brief Host Driver: Routines that perform CN66XX specific operations. */ @@ -96,8 +91,8 @@ void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip, struct octeon_reg_list *reg_list); u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct); u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); -int lio_setup_cn66xx_octeon_device(struct octeon_device *); +int lio_setup_cn66xx_octeon_device(struct octeon_device *oct); int lio_validate_cn6xxx_config_info(struct octeon_device *oct, - struct octeon_config *); + struct octeon_config *conf6xxx); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index 5e3aff242ad3..b248966837b4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn66xx_regs.h * \brief Host Driver: Register Address and Register Mask values for * Octeon CN66XX devices. @@ -443,10 +438,10 @@ #define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80 #define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90 #define CN6XXX_SLI_S2M_PORTX_CTL(port) \ - (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10)) + (CN6XXX_SLI_S2M_PORT0_CTL + ((port) * 0x10)) #define CN6XXX_SLI_INT_ENB64(port) \ - (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10)) + (CN6XXX_SLI_INT_ENB64_PORT0 + ((port) * 0x10)) #define CN6XXX_SLI_MAC_NUMBER 0x3E00 @@ -458,7 +453,7 @@ #define CN6XXX_PCI_BAR1_OFFSET 0x8 #define CN6XXX_BAR1_REG(idx, port) \ - (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \ + (CN6XXX_BAR1_INDEX_START + ((port) * CN6XXX_PEM_OFFSET) + \ (CN6XXX_PCI_BAR1_OFFSET * (idx))) /*############################ DPI #########################*/ @@ -476,17 +471,17 @@ #define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ - (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8)) + (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8)) #define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ - (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8)) + (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8)) #define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL #define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL #define CN6XXX_DPI_SLI_PRTX_CFG(port) \ - (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10)) + (CN6XXX_DPI_SLI_PRT0_CFG + ((port) * 0x10)) #define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58) #define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57) diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c index dbf3566ead53..50b533ff58e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> #include "liquidio_common.h" @@ -76,7 +72,7 @@ static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct) pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); /* 68XX specific */ - max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf)); + max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx)); tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE); tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */ tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */ diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h index ea7bdcce6044..66b8d6bf5ec4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn68xx_device.h * \brief Host Driver: Routines that perform CN68XX specific operations. */ diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h index d45a0f4aaf1f..0b742f09e49d 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file cn68xx_regs.h * \brief Host Driver: Register Address and Register Mask values for * Octeon CN68XX devices. The register map for CN66XX is the same diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 201eddb3013a..f629c2fe04a4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/if_vlan.h> #include "liquidio_common.h" @@ -89,13 +85,6 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, } (*pkts_compl)++; -/*TODO, Use some other pound define to suggest - * the fact that iqs are not tied to netdevs - * and can take traffic from different netdevs - * hence bql reporting is done per packet - * than in bulk. Usage of NO_NAPI in txq completion is - * a little confusing - */ *bytes_compl += skb->len; } @@ -264,3 +253,34 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) nctrl->ncmd.s.cmd); } } + +void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) +{ + bool macaddr_changed = false; + struct net_device *netdev; + struct lio *lio; + + rtnl_lock(); + + netdev = oct->props[0].netdev; + lio = GET_LIO(netdev); + + lio->linfo.macaddr_is_admin_asgnd = true; + + if (!ether_addr_equal(netdev->dev_addr, mac)) { + macaddr_changed = true; + ether_addr_copy(netdev->dev_addr, mac); + ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); + call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); + } + + rtnl_unlock(); + + if (macaddr_changed) + dev_info(&oct->pci_dev->dev, + "PF changed VF's MAC address to %pM\n", mac); + + /* no need to notify the firmware of the macaddr change because + * the PF did that already + */ +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index f163e0abbeb2..b00c3002360e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/netdevice.h> #include <linux/net_tstamp.h> #include <linux/pci.h> @@ -33,6 +29,7 @@ #include "cn66xx_regs.h" #include "cn66xx_device.h" #include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" static int octnet_get_link_stats(struct net_device *netdev); @@ -74,9 +71,9 @@ enum { INTERFACE_MODE_MIXED, }; -#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) #define OCT_ETHTOOL_REGDUMP_LEN 4096 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) +#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) #define OCT_ETHTOOL_REGSVER 1 /* statistics of PF */ @@ -87,9 +84,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_bytes", "rx_errors", /*jabber_err+l2_err+frame_err */ "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ - "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd - *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop - */ + "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + + *st->fromwire.dmac_drop + st->fromwire.fw_err_drop + */ "tx_dropped", "tx_total_sent", @@ -152,6 +149,19 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "link_state_changes", }; +/* statistics of VF */ +static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", /* jabber_err + l2_err+frame_err */ + "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */ + "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */ + "tx_dropped", + "link_state_changes", +}; + /* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ @@ -197,25 +207,28 @@ static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { #define OCTNIC_NCMD_AUTONEG_ON 0x1 #define OCTNIC_NCMD_PHY_ON 0x2 -static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +static int lio_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; + u32 supported, advertising; linfo = &lio->linfo; if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { - ecmd->port = PORT_FIBRE; - ecmd->supported = - (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | - SUPPORTED_Pause); - ecmd->advertising = - (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); - ecmd->transceiver = XCVR_EXTERNAL; - ecmd->autoneg = AUTONEG_DISABLE; + ecmd->base.port = PORT_FIBRE; + supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | + SUPPORTED_Pause); + advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); + ethtool_convert_legacy_u32_to_link_mode( + ecmd->link_modes.supported, supported); + ethtool_convert_legacy_u32_to_link_mode( + ecmd->link_modes.advertising, advertising); + ecmd->base.autoneg = AUTONEG_DISABLE; } else { dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", @@ -223,11 +236,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } if (linfo->link.s.link_up) { - ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); - ecmd->duplex = linfo->link.s.duplex; + ecmd->base.speed = linfo->link.s.speed; + ecmd->base.duplex = linfo->link.s.duplex; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; } return 0; @@ -251,6 +264,23 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) } static void +lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct octeon_device *oct; + struct lio *lio; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); + strcpy(drvinfo->driver, "liquidio_vf"); + strcpy(drvinfo->version, LIQUIDIO_VERSION); + strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + ETHTOOL_FWVERS_LEN); + strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); +} + +static void lio_ethtool_get_channels(struct net_device *dev, struct ethtool_channels *channel) { @@ -259,14 +289,14 @@ lio_ethtool_get_channels(struct net_device *dev, u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; if (OCTEON_CN6XXX(oct)) { - struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); max_rx = CFG_GET_OQ_MAX_Q(conf6x); max_tx = CFG_GET_IQ_MAX_Q(conf6x); rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf); + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); max_rx = CFG_GET_OQ_MAX_Q(conf23); max_tx = CFG_GET_IQ_MAX_Q(conf23); @@ -589,14 +619,14 @@ lio_ethtool_get_ringparam(struct net_device *netdev, rx_pending = 0; if (OCTEON_CN6XXX(oct)) { - struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf); + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; @@ -757,9 +787,6 @@ lio_get_ethtool_stats(struct net_device *netdev, /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ data[i++] = CVM_CAST64(netstats->tx_dropped); - /*data[i++] = CVM_CAST64(stats->multicast); */ - /*data[i++] = CVM_CAST64(stats->collisions); */ - /* firmware tx stats */ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. *fromhost.fw_total_sent @@ -910,9 +937,8 @@ lio_get_ethtool_stats(struct net_device *netdev, /*lio->link_changes*/ data[i++] = CVM_CAST64(lio->link_changes); - /* TX -- lio_update_stats(lio); */ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { - if (!(oct_dev->io_qmask.iq & (1ULL << j))) + if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) continue; /*packets to network port*/ /*# of packets tx to network */ @@ -954,9 +980,8 @@ lio_get_ethtool_stats(struct net_device *netdev, } /* RX */ - /* for (j = 0; j < oct_dev->num_oqs; j++) { */ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { - if (!(oct_dev->io_qmask.oq & (1ULL << j))) + if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) continue; /*packets send to TCP/IP network stack */ @@ -992,6 +1017,109 @@ lio_get_ethtool_stats(struct net_device *netdev, } } +static void lio_vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats + __attribute__((unused)), + u64 *data) +{ + struct net_device_stats *netstats = &netdev->stats; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int i = 0, j, vj; + + netdev->netdev_ops->ndo_get_stats(netdev); + /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = CVM_CAST64(netstats->rx_packets); + /* sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = CVM_CAST64(netstats->tx_packets); + /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = CVM_CAST64(netstats->rx_bytes); + /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = CVM_CAST64(netstats->tx_bytes); + data[i++] = CVM_CAST64(netstats->rx_errors); + data[i++] = CVM_CAST64(netstats->tx_errors); + /* sum of oct->droq[oq_no]->stats->rx_dropped + + * oct->droq[oq_no]->stats->dropped_nodispatch + + * oct->droq[oq_no]->stats->dropped_toomany + + * oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = CVM_CAST64(netstats->rx_dropped); + /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = CVM_CAST64(netstats->tx_dropped); + /* lio->link_changes */ + data[i++] = CVM_CAST64(lio->link_changes); + + for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { + j = lio->linfo.txpciq[vj].s.q_no; + + /* packets to network port */ + /* # of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /* # of bytes tx to network */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /* # of packets dropped */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_dropped); + /* # of tx fails due to queue full */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_iq_busy); + /* XXX gather entries sent */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.sgentry_sent); + + /* instruction to firmware: data and control */ + /* # of instructions to the queue */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_posted); + /* # of instructions processed */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); + /* # of instructions could not be processed */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); + /* bytes sent through the queue */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.bytes_sent); + /* tso request */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /* vxlan request */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); + /* txq restart */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_restart); + } + + /* RX */ + for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { + j = lio->linfo.rxpciq[vj].s.q_no; + + /* packets send to TCP/IP network stack */ + /* # of packets to network stack */ + data[i++] = CVM_CAST64( + oct_dev->droq[j]->stats.rx_pkts_received); + /* # of bytes to network stack */ + data[i++] = CVM_CAST64( + oct_dev->droq[j]->stats.rx_bytes_received); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /* control and data path */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); + } +} + static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) { struct octeon_device *oct_dev = lio->oct_dev; @@ -999,6 +1127,7 @@ static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) switch (oct_dev->chip_id) { case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { sprintf(data, "%s", oct_priv_flags_strings[i]); data += ETH_GSTRING_LEN; @@ -1030,7 +1159,55 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { - if (!(oct_dev->io_qmask.iq & (1ULL << i))) + if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + case ETH_SS_PRIV_FLAGS: + lio_get_priv_flags_strings(lio, data); + break; + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; + } +} + +static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + int num_iq_stats, num_oq_stats, i, j; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int num_stats; + + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_vf_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_vf_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) continue; for (j = 0; j < num_iq_stats; j++) { sprintf(data, "tx-%d-%s", i, @@ -1040,9 +1217,8 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); - /* for (i = 0; i < oct_dev->num_oqs; i++) { */ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { - if (!(oct_dev->io_qmask.oq & (1ULL << i))) + if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) continue; for (j = 0; j < num_oq_stats; j++) { sprintf(data, "rx-%d-%s", i, @@ -1067,6 +1243,7 @@ static int lio_get_priv_flags_ss_count(struct lio *lio) switch (oct_dev->chip_id) { case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: return ARRAY_SIZE(oct_priv_flags_strings); case OCTEON_CN68XX: case OCTEON_CN66XX: @@ -1094,6 +1271,23 @@ static int lio_get_sset_count(struct net_device *netdev, int sset) } } +static int lio_vf_get_sset_count(struct net_device *netdev, int sset) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_vf_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + case ETH_SS_PRIV_FLAGS: + return lio_get_priv_flags_ss_count(lio); + default: + return -EOPNOTSUPP; + } +} + static int lio_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { @@ -1106,6 +1300,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev, switch (oct->chip_id) { case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: if (!intrmod_cfg->rx_enable) { intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs; intr_coal->rx_max_coalesced_frames = @@ -1152,7 +1347,7 @@ static int lio_get_intr_coalesce(struct net_device *netdev, intr_coal->rx_max_coalesced_frames_low = intrmod_cfg->rx_mincnt_trigger; } - if (OCTEON_CN23XX_PF(oct) && + if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && (intrmod_cfg->tx_enable)) { intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable; intr_coal->tx_max_coalesced_frames_high = @@ -1510,6 +1705,26 @@ oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) oct->intrmod.rx_frames = rx_max_coalesced_frames; break; } + case OCTEON_CN23XX_VF_VID: { + int q_no; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = oct->intrmod.rx_frames; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (octeon_read_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & + (0x3fffff00000000UL)) | + rx_max_coalesced_frames); + /* consider writing to resend bit here */ + } + oct->intrmod.rx_frames = rx_max_coalesced_frames; + break; + } default: return -EINVAL; } @@ -1563,6 +1778,27 @@ static int oct_cfg_rx_intrtime(struct lio *lio, oct->intrmod.rx_usecs = rx_coalesce_usecs; break; } + case OCTEON_CN23XX_VF_VID: { + u64 time_threshold; + int q_no; + + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = oct->intrmod.rx_usecs; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + + time_threshold = + cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (oct->intrmod.rx_frames | + (time_threshold << 32))); + /* consider setting resend bit */ + } + oct->intrmod.rx_usecs = rx_coalesce_usecs; + break; + } default: return -EINVAL; } @@ -1584,6 +1820,7 @@ oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal case OCTEON_CN68XX: case OCTEON_CN66XX: break; + case OCTEON_CN23XX_VF_VID: case OCTEON_CN23XX_PF_VID: { int q_no; @@ -1642,6 +1879,7 @@ static int lio_set_intr_coalesce(struct net_device *netdev, } break; case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: break; default: return -EINVAL; @@ -1704,86 +1942,6 @@ static int lio_get_ts_info(struct net_device *netdev, return 0; } -static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) -{ - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; - struct oct_link_info *linfo; - struct octnic_ctrl_pkt nctrl; - int ret = 0; - - /* get the link info */ - linfo = &lio->linfo; - - if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE) - return -EINVAL; - - if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 && - ecmd->speed != SPEED_10) || - (ecmd->duplex != DUPLEX_HALF && - ecmd->duplex != DUPLEX_FULL))) - return -EINVAL; - - /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces - * as they operate at fixed Speed and Duplex settings - */ - if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || - linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XFI) { - dev_info(&oct->pci_dev->dev, - "Autonegotiation, duplex and speed settings cannot be modified.\n"); - return -EINVAL; - } - - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - - nctrl.ncmd.u64 = 0; - nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; - nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 1000; - nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - - /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex - * to SE core application using ncmd.s.more & ncmd.s.param - */ - if (ecmd->autoneg == AUTONEG_ENABLE) { - /* Autoneg ON */ - nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | - OCTNIC_NCMD_AUTONEG_ON; - nctrl.ncmd.s.param1 = ecmd->advertising; - } else { - /* Autoneg OFF */ - nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; - - nctrl.ncmd.s.param2 = ecmd->duplex; - - nctrl.ncmd.s.param1 = ecmd->speed; - } - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); - return -1; - } - - return 0; -} - -static int lio_nway_reset(struct net_device *netdev) -{ - if (netif_running(netdev)) { - struct ethtool_cmd ecmd; - - memset(&ecmd, 0, sizeof(struct ethtool_cmd)); - ecmd.autoneg = 0; - ecmd.speed = 0; - ecmd.duplex = 0; - lio_set_settings(netdev, &ecmd); - } - return 0; -} - /* Return register dump len. */ static int lio_get_regs_len(struct net_device *dev) { @@ -1793,6 +1951,8 @@ static int lio_get_regs_len(struct net_device *dev) switch (oct->chip_id) { case OCTEON_CN23XX_PF_VID: return OCT_ETHTOOL_REGDUMP_LEN_23XX; + case OCTEON_CN23XX_VF_VID: + return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; default: return OCT_ETHTOOL_REGDUMP_LEN; } @@ -2018,6 +2178,123 @@ static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) return len; } +static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) +{ + int len = 0; + u32 reg; + int i; + + /* PCI Window Registers */ + + len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); + len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_DOORBELL(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + return len; +} + static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) { u32 reg; @@ -2164,6 +2441,10 @@ static void lio_get_regs(struct net_device *dev, memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); len += cn23xx_read_csr_reg(regbuf + len, oct); break; + case OCTEON_CN23XX_VF_VID: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); + len += cn23xx_vf_read_csr_reg(regbuf + len, oct); + break; case OCTEON_CN68XX: case OCTEON_CN66XX: memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); @@ -2194,7 +2475,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) } static const struct ethtool_ops lio_ethtool_ops = { - .get_settings = lio_get_settings, + .get_link_ksettings = lio_get_link_ksettings, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, @@ -2211,8 +2492,26 @@ static const struct ethtool_ops lio_ethtool_ops = { .get_msglevel = lio_get_msglevel, .set_msglevel = lio_set_msglevel, .get_sset_count = lio_get_sset_count, - .nway_reset = lio_nway_reset, - .set_settings = lio_set_settings, + .get_coalesce = lio_get_intr_coalesce, + .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, + .get_ts_info = lio_get_ts_info, +}; + +static const struct ethtool_ops lio_vf_ethtool_ops = { + .get_link_ksettings = lio_get_link_ksettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = lio_get_vf_drvinfo, + .get_ringparam = lio_ethtool_get_ringparam, + .get_channels = lio_ethtool_get_channels, + .get_strings = lio_vf_get_strings, + .get_ethtool_stats = lio_vf_get_ethtool_stats, + .get_regs_len = lio_get_regs_len, + .get_regs = lio_get_regs, + .get_msglevel = lio_get_msglevel, + .set_msglevel = lio_set_msglevel, + .get_sset_count = lio_vf_get_sset_count, .get_coalesce = lio_get_intr_coalesce, .set_coalesce = lio_set_intr_coalesce, .get_priv_flags = lio_get_priv_flags, @@ -2222,5 +2521,11 @@ static const struct ethtool_ops lio_ethtool_ops = { void liquidio_set_ethtool_ops(struct net_device *netdev) { - netdev->ethtool_ops = &lio_ethtool_ops; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (OCTEON_CN23XX_VF(oct)) + netdev->ethtool_ops = &lio_vf_ethtool_ops; + else + netdev->ethtool_ops = &lio_ethtool_ops; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index afc6f9dc8119..39a9665c9d00 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1,28 +1,22 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ -#include <linux/version.h> + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/firmware.h> -#include <linux/ptp_clock_kernel.h> #include <net/vxlan.h> #include <linux/kthread.h> #include "liquidio_common.h" @@ -46,6 +40,7 @@ MODULE_VERSION(LIQUIDIO_VERSION); MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX); static int ddr_timeout = 10000; module_param(ddr_timeout, int, 0644); @@ -54,9 +49,6 @@ MODULE_PARM_DESC(ddr_timeout, #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ - (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) - static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); @@ -65,10 +57,6 @@ static char fw_type[LIO_MAX_FW_TYPE_LEN]; module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); -static int conf_type; -module_param(conf_type, int, 0); -MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); - static int ptp_enable = 1; /* Bit mask values for lio->ifstate */ @@ -180,6 +168,10 @@ struct octeon_device_priv { unsigned long napi_mask; }; +#ifdef CONFIG_PCI_IOV +static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); +#endif + static int octeon_device_init(struct octeon_device *); static int liquidio_stop(struct net_device *netdev); static void liquidio_remove(struct pci_dev *pdev); @@ -197,9 +189,8 @@ static void octeon_droq_bh(unsigned long pdev) struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)oct->priv; - /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { - if (!(oct->io_qmask.oq & (1ULL << q_no))) + if (!(oct->io_qmask.oq & BIT_ULL(q_no))) continue; reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], MAX_PACKET_BUDGET); @@ -234,7 +225,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) pending_pkts = 0; for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { - if (!(oct->io_qmask.oq & (1ULL << i))) + if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); } @@ -316,7 +307,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq; - if (!(oct->io_qmask.iq & (1ULL << i))) + if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; iq = oct->instr_queue[i]; @@ -382,7 +373,6 @@ static void stop_pci_io(struct octeon_device *oct) dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", lio_get_state_string(&oct->status)); - /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */ /* making it a common function for all OCTEON models */ cleanup_aer_uncorrect_error_status(oct->pci_dev); } @@ -518,6 +508,9 @@ static struct pci_driver liquidio_pci_driver = { .suspend = liquidio_suspend, .resume = liquidio_resume, #endif +#ifdef CONFIG_PCI_IOV + .sriov_configure = liquidio_enable_sriov, +#endif }; /** @@ -763,6 +756,7 @@ static void delete_glists(struct lio *lio) } kfree((void *)lio->glist); + kfree((void *)lio->glist_lock); } /** @@ -933,7 +927,6 @@ static inline void update_link_status(struct net_device *netdev, if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); - /* start_txq(netdev); */ txqs_wake(netdev); } else { netif_carrier_off(netdev); @@ -1011,7 +1004,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { - if (!(oct->droq_intr & (1ULL << oq_no))) + if (!(oct->droq_intr & BIT_ULL(oq_no))) continue; droq = oct->droq[oq_no]; @@ -1322,6 +1315,7 @@ liquidio_probe(struct pci_dev *pdev, complete(&first_stage); if (octeon_device_init(oct_dev)) { + complete(&hs->init); liquidio_remove(pdev); return -ENOMEM; } @@ -1346,7 +1340,15 @@ liquidio_probe(struct pci_dev *pdev, oct_dev->watchdog_task = kthread_create( liquidio_watchdog, oct_dev, "liowd/%02hhx:%02hhx.%hhx", bus, device, function); - wake_up_process(oct_dev->watchdog_task); + if (!IS_ERR(oct_dev->watchdog_task)) { + wake_up_process(oct_dev->watchdog_task); + } else { + oct_dev->watchdog_task = NULL; + dev_err(&oct_dev->pci_dev->dev, + "failed to create kernel_thread\n"); + liquidio_remove(pdev); + return -1; + } } } @@ -1410,6 +1412,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + /* fallthrough */ + case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -1436,12 +1440,20 @@ static void octeon_destroy_resources(struct octeon_device *oct) pci_disable_msi(oct->pci_dev); } + /* fallthrough */ + case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: if (OCTEON_CN23XX_PF(oct)) octeon_free_ioq_vector(oct); + + /* fallthrough */ + case OCT_DEV_MBOX_SETUP_DONE: + if (OCTEON_CN23XX_PF(oct)) + oct->fn_list.free_mbox(oct); + /* fallthrough */ case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: - /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ + /* Wait for any pending operations */ mdelay(100); for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) @@ -1472,6 +1484,10 @@ static void octeon_destroy_resources(struct octeon_device *oct) continue; octeon_delete_instr_queue(oct, i); } +#ifdef CONFIG_PCI_IOV + if (oct->sriov_info.sriov_enabled) + pci_disable_sriov(oct->pci_dev); +#endif /* fallthrough */ case OCT_DEV_SC_BUFF_POOL_INIT_DONE: octeon_free_sc_buffer_pool(oct); @@ -1491,10 +1507,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) octeon_unmap_pci_barx(oct, 1); /* fallthrough */ - case OCT_DEV_BEGIN_STATE: + case OCT_DEV_PCI_ENABLE_DONE: + pci_clear_master(oct->pci_dev); /* Disable the device, releasing the PCI INT */ pci_disable_device(oct->pci_dev); + /* fallthrough */ + case OCT_DEV_BEGIN_STATE: /* Nothing to be done here either */ break; } /* end switch (oct->status) */ @@ -1764,6 +1783,7 @@ static int octeon_pci_os_setup(struct octeon_device *oct) if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); + pci_disable_device(oct->pci_dev); return 1; } @@ -2426,7 +2446,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) * Return back if tx_done is false. */ update_txq_status(oct, iq_no); - /*tx_done = (iq->flush_index == iq->octeon_read_index);*/ } else { dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", __func__, iq_no); @@ -2868,17 +2887,6 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) struct octnic_ctrl_pkt nctrl; int ret = 0; - /* Limit the MTU to make sure the ethernet packets are between 68 bytes - * and 16000 bytes - */ - if ((new_mtu < LIO_MIN_MTU_SIZE) || - (new_mtu > LIO_MAX_MTU_SIZE)) { - dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); - dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", - LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE); - return -EINVAL; - } - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; @@ -3567,7 +3575,152 @@ static void liquidio_del_vxlan_port(struct net_device *netdev, OCTNET_CMD_VXLAN_PORT_DEL); } -static struct net_device_ops lionetdevops = { +static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, + u8 *mac, bool is_admin_assigned) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) + return -EINVAL; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + nctrl.ncmd.s.param1 = vfidx + 1; + nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); + nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + nctrl.wait_time = LIO_CMD_WAIT_TM; + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); + + oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; + + octnet_send_nic_ctrl_pkt(oct, &nctrl); + + return 0; +} + +static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int retval; + + retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); + if (!retval) + cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); + + return retval; +} + +static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + u16 vlantci; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + if (vlan >= VLAN_N_VID || qos > 7) + return -EINVAL; + + if (vlan) + vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; + else + vlantci = 0; + + if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) + return 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + if (vlan) + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + else + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + + nctrl.ncmd.s.param1 = vlantci; + nctrl.ncmd.s.param2 = + vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + nctrl.wait_time = LIO_CMD_WAIT_TM; + + octnet_send_nic_ctrl_pkt(oct, &nctrl); + + oct->sriov_info.vf_vlantci[vfidx] = vlantci; + + return 0; +} + +static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + u8 *macaddr; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + ivi->vf = vfidx; + macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; + ether_addr_copy(&ivi->mac[0], macaddr); + ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; + ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; + ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; + return 0; +} + +static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, + int linkstate) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) + return 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ + nctrl.ncmd.s.param2 = linkstate; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + nctrl.wait_time = LIO_CMD_WAIT_TM; + + octnet_send_nic_ctrl_pkt(oct, &nctrl); + + oct->sriov_info.vf_linkstate[vfidx] = linkstate; + + return 0; +} + +static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, @@ -3584,6 +3737,11 @@ static struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, + .ndo_set_vf_mac = liquidio_set_vf_mac, + .ndo_set_vf_vlan = liquidio_set_vf_vlan, + .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_link_state = liquidio_set_vf_link_state, + .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module @@ -3595,7 +3753,7 @@ static int __init liquidio_init(void) init_completion(&first_stage); - octeon_init_device_list(conf_type); + octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); if (liquidio_init_pci()) return -EINVAL; @@ -3816,9 +3974,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); - if (num_iqueues > 1) - lionetdevops.ndo_select_queue = select_q; - /* Associate the routines that will handle different * netdev tasks. */ @@ -3891,6 +4046,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->hw_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_RX; + /* MTU range: 68 - 16000 */ + netdev->min_mtu = LIO_MIN_MTU_SIZE; + netdev->max_mtu = LIO_MAX_MTU_SIZE; + /* Point to the properties for octeon device to which this * interface belongs. */ @@ -3902,6 +4061,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "if%d gmx: %d hw_addr: 0x%llx\n", i, lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); + for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { + u8 vfmac[ETH_ALEN]; + + random_ether_addr(&vfmac[0]); + if (__liquidio_set_vf_mac(netdev, j, + &vfmac[0], false)) { + dev_err(&octeon_dev->pci_dev->dev, + "Error setting VF%d MAC address\n", + j); + goto setup_nic_dev_fail; + } + } + /* 64-bit swap required on LE machines */ octeon_swap_8B_data(&lio->linfo.hw_addr, 1); for (j = 0; j < 6; j++) @@ -3997,6 +4169,101 @@ setup_nic_wait_intr: return -ENODEV; } +#ifdef CONFIG_PCI_IOV +static int octeon_enable_sriov(struct octeon_device *oct) +{ + unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; + struct pci_dev *vfdev; + int err; + u32 u; + + if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { + err = pci_enable_sriov(oct->pci_dev, + oct->sriov_info.num_vfs_alloced); + if (err) { + dev_err(&oct->pci_dev->dev, + "OCTEON: Failed to enable PCI sriov: %d\n", + err); + oct->sriov_info.num_vfs_alloced = 0; + return err; + } + oct->sriov_info.sriov_enabled = 1; + + /* init lookup table that maps DPI ring number to VF pci_dev + * struct pointer + */ + u = 0; + vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + OCTEON_CN23XX_VF_VID, NULL); + while (vfdev) { + if (vfdev->is_virtfn && + (vfdev->physfn == oct->pci_dev)) { + oct->sriov_info.dpiring_to_vfpcidev_lut[u] = + vfdev; + u += oct->sriov_info.rings_per_vf; + } + vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + OCTEON_CN23XX_VF_VID, vfdev); + } + } + + return num_vfs_alloced; +} + +static int lio_pci_sriov_disable(struct octeon_device *oct) +{ + int u; + + if (pci_vfs_assigned(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); + return -EPERM; + } + + pci_disable_sriov(oct->pci_dev); + + u = 0; + while (u < MAX_POSSIBLE_VFS) { + oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; + u += oct->sriov_info.rings_per_vf; + } + + oct->sriov_info.num_vfs_alloced = 0; + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", + oct->pf_num); + + return 0; +} + +static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + struct octeon_device *oct = pci_get_drvdata(dev); + int ret = 0; + + if ((num_vfs == oct->sriov_info.num_vfs_alloced) && + (oct->sriov_info.sriov_enabled)) { + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", + oct->pf_num, num_vfs); + return 0; + } + + if (!num_vfs) { + ret = lio_pci_sriov_disable(oct); + } else if (num_vfs > oct->sriov_info.max_vfs) { + dev_err(&oct->pci_dev->dev, + "OCTEON: Max allowed VFs:%d user requested:%d", + oct->sriov_info.max_vfs, num_vfs); + ret = -EPERM; + } else { + oct->sriov_info.num_vfs_alloced = num_vfs; + ret = octeon_enable_sriov(oct); + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", + oct->pf_num, num_vfs); + } + + return ret; +} +#endif + /** * \brief initialize the NIC * @param oct octeon device @@ -4102,6 +4369,52 @@ static void nic_starter(struct work_struct *work) complete(&handshake[oct->octeon_id].started); } +static int +octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + int i, notice, vf_idx; + u64 *data, vf_num; + + notice = recv_pkt->rh.r.ossp; + data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]); + + /* the first 64-bit word of data is the vf_num */ + vf_num = data[0]; + octeon_swap_8B_data(&vf_num, 1); + vf_idx = (int)vf_num - 1; + + if (notice == VF_DRV_LOADED) { + if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { + oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); + dev_info(&oct->pci_dev->dev, + "driver for VF%d was loaded\n", vf_idx); + try_module_get(THIS_MODULE); + } + } else if (notice == VF_DRV_REMOVED) { + if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { + oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); + dev_info(&oct->pci_dev->dev, + "driver for VF%d was removed\n", vf_idx); + module_put(THIS_MODULE); + } + } else if (notice == VF_DRV_MACADDR_CHANGED) { + u8 *b = (u8 *)&data[1]; + + oct->sriov_info.vf_macaddr[vf_idx] = data[1]; + dev_info(&oct->pci_dev->dev, + "VF driver changed VF%d's MAC address to %pM\n", + vf_idx, b + 2); + } + + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + + return 0; +} + /** * \brief Device initialization for each Octeon device that is probed * @param octeon_dev octeon device @@ -4121,6 +4434,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) if (octeon_pci_os_setup(octeon_dev)) return 1; + atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); + /* Identify the Octeon type and map the BAR address space. */ if (octeon_chip_specific_setup(octeon_dev)) { dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); @@ -4160,6 +4475,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) octeon_core_drv_init, octeon_dev); + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_VF_DRV_NOTICE, + octeon_recv_vf_drv_notice, octeon_dev); INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; schedule_delayed_work(&octeon_dev->nic_poll_work.work, @@ -4167,7 +4485,10 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); - octeon_set_io_queues_off(octeon_dev); + if (octeon_set_io_queues_off(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); + return 1; + } if (OCTEON_CN23XX_PF(octeon_dev)) { ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); @@ -4189,9 +4510,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) if (octeon_setup_instr_queues(octeon_dev)) { dev_err(&octeon_dev->pci_dev->dev, "instruction queue initialization failed\n"); - /* On error, release any previously allocated queues */ - for (j = 0; j < octeon_dev->num_iqs; j++) - octeon_delete_instr_queue(octeon_dev, j); return 1; } atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); @@ -4207,19 +4525,23 @@ static int octeon_device_init(struct octeon_device *octeon_dev) if (octeon_setup_output_queues(octeon_dev)) { dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); - /* Release any previously allocated queues */ - for (j = 0; j < octeon_dev->num_oqs; j++) - octeon_delete_droq(octeon_dev, j); return 1; } atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); if (OCTEON_CN23XX_PF(octeon_dev)) { + if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); + if (octeon_allocate_ioq_vector(octeon_dev)) { dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); return 1; } + atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); } else { /* The input and output queue registers were setup earlier (the @@ -4247,6 +4569,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); + atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); + /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c new file mode 100644 index 000000000000..70d96c10c673 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -0,0 +1,3251 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <net/vxlan.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn23xx_vf_device.h" + +MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LIQUIDIO_VERSION); + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +/* Bit mask values for lio->ifstate */ +#define LIO_IFSTATE_DROQ_OPS 0x01 +#define LIO_IFSTATE_REGISTERED 0x02 +#define LIO_IFSTATE_RUNNING 0x04 +#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 + +struct liquidio_if_cfg_context { + int octeon_id; + + wait_queue_head_t wc; + + int cond; +}; + +struct liquidio_if_cfg_resp { + u64 rh; + struct liquidio_if_cfg_info cfg_info; + u64 status; +}; + +struct liquidio_rx_ctl_context { + int octeon_id; + + wait_queue_head_t wc; + + int cond; +}; + +struct oct_timestamp_resp { + u64 rh; + u64 timestamp; + u64 status; +}; + +union tx_info { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 gso_size; + u16 gso_segs; + u32 reserved; +#else + u32 reserved; + u16 gso_segs; + u16 gso_size; +#endif + } s; +}; + +#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) + +#define OCTNIC_GSO_MAX_HEADER_SIZE 128 +#define OCTNIC_GSO_MAX_SIZE \ + (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) + +struct octnic_gather { + /* List manipulation. Next and prev pointers. */ + struct list_head list; + + /* Size of the gather component at sg in bytes. */ + int sg_size; + + /* Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /* Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; +}; + +struct octeon_device_priv { + /* Tasklet structures for this device. */ + struct tasklet_struct droq_tasklet; + unsigned long napi_mask; +}; + +static int +liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void liquidio_vf_remove(struct pci_dev *pdev); +static int octeon_device_init(struct octeon_device *oct); +static int liquidio_stop(struct net_device *netdev); + +static int lio_wait_for_oq_pkts(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; + int pkt_cnt = 0, pending_pkts; + int i; + + do { + pending_pkts = 0; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); + } + if (pkt_cnt > 0) { + pending_pkts += pkt_cnt; + tasklet_schedule(&oct_priv->droq_tasklet); + } + pkt_cnt = 0; + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pkt_cnt; +} + +/** + * \brief wait for all pending requests to complete + * @param oct Pointer to Octeon device + * + * Called during shutdown sequence + */ +static int wait_for_pending_requests(struct octeon_device *oct) +{ + int i, pcount = 0; + + for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { + pcount = atomic_read( + &oct->response_list[OCTEON_ORDERED_SC_LIST] + .pending_req_count); + if (pcount) + schedule_timeout_uninterruptible(HZ / 10); + else + break; + } + + if (pcount) + return 1; + + return 0; +} + +/** + * \brief Cause device to go quiet so it can be safely removed/reset/etc + * @param oct Pointer to Octeon device + */ +static void pcierror_quiesce_device(struct octeon_device *oct) +{ + int i; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet processing + * to finish. + */ + + /* To allow for in-flight requests */ + schedule_timeout_uninterruptible(100); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Force all requests waiting to be fetched by OCTEON to complete. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + /* Force all pending ordered list requests to time out. */ + lio_process_ordered_list(oct, 1); + + /* We do not need to wait for output queue packets to be processed. */ +} + +/** + * \brief Cleanup PCI AER uncorrectable error status + * @param dev Pointer to PCI device + */ +static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + u32 status, mask; + int pos = 0x100; + + pr_info("%s :\n", __func__); + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); + if (dev->error_state == pci_channel_io_normal) + status &= ~mask; /* Clear corresponding nonfatal bits */ + else + status &= mask; /* Clear corresponding fatal bits */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); +} + +/** + * \brief Stop all PCI IO to a given device + * @param dev Pointer to Octeon device + */ +static void stop_pci_io(struct octeon_device *oct) +{ + struct msix_entry *msix_entries; + int i; + + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + for (i = 0; i < oct->ifcount; i++) + netif_device_detach(oct->props[i].netdev); + + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + pcierror_quiesce_device(oct); + if (oct->msix_on) { + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < oct->num_msix_irqs; i++) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + octeon_free_ioq_vector(oct); + } + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + /* making it a common function for all OCTEON models */ + cleanup_aer_uncorrect_error_status(oct->pci_dev); + + pci_disable_device(oct->pci_dev); +} + +/** + * \brief called when PCI error is detected + * @param pdev Pointer to PCI device + * @param state The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct octeon_device *oct = pci_get_drvdata(pdev); + + /* Non-correctable Non-fatal errors */ + if (state == pci_channel_io_normal) { + dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); + cleanup_aer_uncorrect_error_status(oct->pci_dev); + return PCI_ERS_RESULT_CAN_RECOVER; + } + + /* Non-correctable Fatal errors */ + dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); + stop_pci_io(oct); + + return PCI_ERS_RESULT_DISCONNECT; +} + +/* For PCI-E Advanced Error Recovery (AER) Interface */ +static const struct pci_error_handlers liquidio_vf_err_handler = { + .error_detected = liquidio_pcie_error_detected, +}; + +static const struct pci_device_id liquidio_vf_pci_tbl[] = { + { + PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0 + } +}; +MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); + +static struct pci_driver liquidio_vf_pci_driver = { + .name = "LiquidIO_VF", + .id_table = liquidio_vf_pci_tbl, + .probe = liquidio_vf_probe, + .remove = liquidio_vf_remove, + .err_handler = &liquidio_vf_err_handler, /* For AER */ +}; + +/** + * \brief check interface state + * @param lio per-network private data + * @param state_flag flag state to check + */ +static int ifstate_check(struct lio *lio, int state_flag) +{ + return atomic_read(&lio->ifstate) & state_flag; +} + +/** + * \brief set interface state + * @param lio per-network private data + * @param state_flag flag state to set + */ +static void ifstate_set(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); +} + +/** + * \brief clear interface state + * @param lio per-network private data + * @param state_flag flag state to clear + */ +static void ifstate_reset(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); +} + +/** + * \brief Stop Tx queues + * @param netdev network device + */ +static void txqs_stop(struct net_device *netdev) +{ + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); + } else { + netif_stop_queue(netdev); + } +} + +/** + * \brief Start Tx queues + * @param netdev network device + */ +static void txqs_start(struct net_device *netdev) +{ + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_start_subqueue(netdev, i); + } else { + netif_start_queue(netdev); + } +} + +/** + * \brief Wake Tx queues + * @param netdev network device + */ +static void txqs_wake(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) { + int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] + .s.q_no; + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } + } else { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); + netif_wake_queue(netdev); + } +} + +/** + * \brief Start Tx queue + * @param netdev network device + */ +static void start_txq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->linfo.link.s.link_up) { + txqs_start(netdev); + return; + } +} + +/** + * \brief Wake a queue + * @param netdev network device + * @param q which queue to wake + */ +static void wake_q(struct net_device *netdev, int q) +{ + if (netif_is_multiqueue(netdev)) + netif_wake_subqueue(netdev, q); + else + netif_wake_queue(netdev); +} + +/** + * \brief Stop a queue + * @param netdev network device + * @param q which queue to stop + */ +static void stop_q(struct net_device *netdev, int q) +{ + if (netif_is_multiqueue(netdev)) + netif_stop_subqueue(netdev, q); + else + netif_stop_queue(netdev); +} + +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static struct list_head *list_delete_head(struct list_head *root) +{ + struct list_head *node; + + if ((root->prev == root) && (root->next == root)) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; +} + +/** + * \brief Delete gather lists + * @param lio per-network private data + */ +static void delete_glists(struct lio *lio) +{ + struct octnic_gather *g; + int i; + + if (!lio->glist) + return; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + do { + g = (struct octnic_gather *) + list_delete_head(&lio->glist[i]); + if (g) { + if (g->sg) + kfree((void *)((unsigned long)g->sg - + g->adjust)); + kfree(g); + } + } while (g); + } + + kfree(lio->glist); + kfree(lio->glist_lock); +} + +/** + * \brief Setup gather lists + * @param lio per-network private data + */ +static int setup_glists(struct lio *lio, int num_iqs) +{ + struct octnic_gather *g; + int i, j; + + lio->glist_lock = + kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); + if (!lio->glist_lock) + return 1; + + lio->glist = + kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); + if (!lio->glist) { + kfree(lio->glist_lock); + return 1; + } + + for (i = 0; i < num_iqs; i++) { + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * + OCT_SG_ENTRY_SIZE); + + g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); + if (!g->sg) { + kfree(g); + break; + } + + /* The gather component should be aligned on 64-bit + * boundary + */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = (struct octeon_sg_entry *) + ((unsigned long)g->sg + g->adjust); + } + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + delete_glists(lio); + return 1; + } + } + + return 0; +} + +/** + * \brief Print link information + * @param netdev network device + */ +static void print_link_info(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + struct oct_link_info *linfo = &lio->linfo; + + if (linfo->link.s.link_up) { + netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", + linfo->link.s.speed, + (linfo->link.s.duplex) ? "Full" : "Half"); + } else { + netif_info(lio, link, lio->netdev, "Link Down\n"); + } + } +} + +/** + * \brief Routine to notify MTU change + * @param work work_struct data structure + */ +static void octnet_link_status_change(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + + rtnl_lock(); + call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev); + rtnl_unlock(); +} + +/** + * \brief Sets up the mtu status change work + * @param netdev network device + */ +static int setup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->link_status_wq.wq = alloc_workqueue("link-status", + WQ_MEM_RECLAIM, 0); + if (!lio->link_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, + octnet_link_status_change); + lio->link_status_wq.wk.ctxptr = lio; + + return 0; +} + +static void cleanup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->link_status_wq.wq) { + cancel_delayed_work_sync(&lio->link_status_wq.wk.work); + destroy_workqueue(lio->link_status_wq.wq); + } +} + +/** + * \brief Update link status + * @param netdev network device + * @param ls link status structure + * + * Called on receipt of a link status response from the core application to + * update each interface's link status. + */ +static void update_link_status(struct net_device *netdev, + union oct_link_status *ls) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { + lio->linfo.link.u64 = ls->u64; + + print_link_info(netdev); + lio->link_changes++; + + if (lio->linfo.link.s.link_up) { + netif_carrier_on(netdev); + txqs_wake(netdev); + } else { + netif_carrier_off(netdev); + txqs_stop(netdev); + } + + if (lio->linfo.link.s.mtu < netdev->mtu) { + dev_warn(&oct->pci_dev->dev, + "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", + netdev->mtu, lio->linfo.link.s.mtu); + lio->mtu = lio->linfo.link.s.mtu; + netdev->mtu = lio->linfo.link.s.mtu; + queue_delayed_work(lio->link_status_wq.wq, + &lio->link_status_wq.wk.work, 0); + } + } +} + +static void update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + struct net_device *netdev; + struct lio *lio; + + netdev = oct->props[iq->ifidx].netdev; + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + } else { + if (!octnet_iq_is_full(oct, lio->txq)) { + INCR_INSTRQUEUE_PKT_COUNT( + lio->oct_dev, lio->txq, tx_restart, 1); + wake_q(netdev, lio->txq); + } + } + } +} + +static +int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) +{ + struct octeon_device *oct = droq->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + } else { + if (ret & MSIX_PO_INT) { + dev_err(&oct->pci_dev->dev, + "should not come here should not get rx when poll mode = 0 for vf\n"); + tasklet_schedule(&oct_priv->droq_tasklet); + return 1; + } + /* this will be flushed periodically by check iq db */ + if (ret & MSIX_PI_INT) + return 0; + } + return 0; +} + +static irqreturn_t +liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 ret; + + ret = oct->fn_list.msix_interrupt_handler(ioq_vector); + + if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) + liquidio_schedule_msix_droq_pkt_handler(droq, ret); + + return IRQ_HANDLED; +} + +/** + * \brief Setup interrupt for octeon device + * @param oct octeon device + * + * Enable interrupt in Octeon device as given in the PCI interrupt mask. + */ +static int octeon_setup_interrupt(struct octeon_device *oct) +{ + struct msix_entry *msix_entries; + int num_alloc_ioq_vectors; + int num_ioq_vectors; + int irqret; + int i; + + if (oct->msix_on) { + oct->num_msix_irqs = oct->sriov_info.rings_per_vf; + + oct->msix_entries = kcalloc( + oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); + if (!oct->msix_entries) + return 1; + + msix_entries = (struct msix_entry *)oct->msix_entries; + + for (i = 0; i < oct->num_msix_irqs; i++) + msix_entries[i].entry = i; + num_alloc_ioq_vectors = pci_enable_msix_range( + oct->pci_dev, msix_entries, + oct->num_msix_irqs, + oct->num_msix_irqs); + if (num_alloc_ioq_vectors < 0) { + dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + return 1; + } + dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); + + num_ioq_vectors = oct->num_msix_irqs; + + for (i = 0; i < num_ioq_vectors; i++) { + irqret = request_irq(msix_entries[i].vector, + liquidio_msix_intr_handler, 0, + "octeon", &oct->ioq_vector[i]); + if (irqret) { + dev_err(&oct->pci_dev->dev, + "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + + while (i) { + i--; + irq_set_affinity_hint( + msix_entries[i].vector, NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + return 1; + } + oct->ioq_vector[i].vector = msix_entries[i].vector; + /* assign the cpu mask for this msix interrupt vector */ + irq_set_affinity_hint( + msix_entries[i].vector, + (&oct->ioq_vector[i].affinity_mask)); + } + dev_dbg(&oct->pci_dev->dev, + "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); + } + return 0; +} + +/** + * \brief PCI probe handler + * @param pdev PCI device structure + * @param ent unused + */ +static int +liquidio_vf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent __attribute__((unused))) +{ + struct octeon_device *oct_dev = NULL; + + oct_dev = octeon_allocate_device(pdev->device, + sizeof(struct octeon_device_priv)); + + if (!oct_dev) { + dev_err(&pdev->dev, "Unable to allocate device\n"); + return -ENOMEM; + } + oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; + + dev_info(&pdev->dev, "Initializing device %x:%x.\n", + (u32)pdev->vendor, (u32)pdev->device); + + /* Assign octeon_device for this device to the private data area. */ + pci_set_drvdata(pdev, oct_dev); + + /* set linux specific device pointer */ + oct_dev->pci_dev = pdev; + + if (octeon_device_init(oct_dev)) { + liquidio_vf_remove(pdev); + return -ENOMEM; + } + + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); + + return 0; +} + +/** + * \brief PCI FLR for each Octeon device. + * @param oct octeon device + */ +static void octeon_pci_flr(struct octeon_device *oct) +{ + u16 status; + + pci_save_state(oct->pci_dev); + + pci_cfg_access_lock(oct->pci_dev); + + /* Quiesce the device completely */ + pci_write_config_word(oct->pci_dev, PCI_COMMAND, + PCI_COMMAND_INTX_DISABLE); + + /* Wait for Transaction Pending bit clean */ + msleep(100); + pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status); + if (status & PCI_EXP_DEVSTA_TRPND) { + dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); + ssleep(5); + pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, + &status); + if (status & PCI_EXP_DEVSTA_TRPND) + dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n"); + } + pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + mdelay(100); + + pci_cfg_access_unlock(oct->pci_dev); + + pci_restore_state(oct->pci_dev); +} + +/** + *\brief Destroy resources associated with octeon device + * @param pdev PCI device structure + * @param ent unused + */ +static void octeon_destroy_resources(struct octeon_device *oct) +{ + struct msix_entry *msix_entries; + int i; + + switch (atomic_read(&oct->status)) { + case OCT_DEV_RUNNING: + case OCT_DEV_CORE_OK: + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + oct->app_mode = CVM_DRV_INVALID_APP; + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + schedule_timeout_uninterruptible(HZ / 10); + + /* fallthrough */ + case OCT_DEV_HOST_OK: + /* fallthrough */ + case OCT_DEV_IO_QUEUES_DONE: + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet + * processing to finish. + */ + oct->fn_list.disable_io_queues(oct); + + if (lio_wait_for_oq_pkts(oct)) + dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + + case OCT_DEV_INTR_SET_DONE: + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < oct->num_msix_irqs; i++) { + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + } + /* Soft reset the octeon device before exiting */ + if (oct->pci_dev->reset_fn) + octeon_pci_flr(oct); + else + cn23xx_vf_ask_pf_to_do_flr(oct); + + /* fallthrough */ + case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: + octeon_free_ioq_vector(oct); + + /* fallthrough */ + case OCT_DEV_MBOX_SETUP_DONE: + oct->fn_list.free_mbox(oct); + + /* fallthrough */ + case OCT_DEV_IN_RESET: + case OCT_DEV_DROQ_INIT_DONE: + mdelay(100); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + /* fallthrough */ + case OCT_DEV_RESP_LIST_INIT_DONE: + octeon_delete_response_list(oct); + + /* fallthrough */ + case OCT_DEV_INSTR_QUEUE_INIT_DONE: + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + /* fallthrough */ + case OCT_DEV_SC_BUFF_POOL_INIT_DONE: + octeon_free_sc_buffer_pool(oct); + + /* fallthrough */ + case OCT_DEV_DISPATCH_INIT_DONE: + octeon_delete_dispatch_list(oct); + cancel_delayed_work_sync(&oct->nic_poll_work.work); + + /* fallthrough */ + case OCT_DEV_PCI_MAP_DONE: + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + + /* fallthrough */ + case OCT_DEV_PCI_ENABLE_DONE: + pci_clear_master(oct->pci_dev); + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + + /* fallthrough */ + case OCT_DEV_BEGIN_STATE: + /* Nothing to be done here either */ + break; + } +} + +/** + * \brief Callback for rx ctrl + * @param status status of request + * @param buf pointer to resp structure + */ +static void rx_ctl_callback(struct octeon_device *oct, + u32 status, void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_rx_ctl_context *ctx; + + ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (status) + dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", + CVM_CAST64(status)); + WRITE_ONCE(ctx->cond, 1); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Send Rx control command + * @param lio per-network private data + * @param start_stop whether to start or stop + */ +static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) +{ + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + int ctx_size = sizeof(struct liquidio_rx_ctl_context); + struct liquidio_rx_ctl_context *ctx; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int retval; + + if (oct->props[lio->ifidx].rx_on == start_stop) + return; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + 16, ctx_size); + + ncmd = (union octnet_cmd *)sc->virtdptr; + ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; + + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct); + init_waitqueue_head(&ctx->wc); + + ncmd->u64 = 0; + ncmd->s.cmd = OCTNET_CMD_RX_CTL; + ncmd->s.param1 = start_stop; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_CMD, 0, 0, 0); + + sc->callback = rx_ctl_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + } else { + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + return; + oct->props[lio->ifidx].rx_on = start_stop; + } + + octeon_free_soft_command(oct, sc); +} + +/** + * \brief Destroy NIC device interface + * @param oct octeon device + * @param ifidx which interface to destroy + * + * Cleanup associated with each interface for an Octeon device when NIC + * module is being unloaded or if initialization fails during load. + */ +static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) +{ + struct net_device *netdev = oct->props[ifidx].netdev; + struct napi_struct *napi, *n; + struct lio *lio; + + if (!netdev) { + dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", + __func__, ifidx); + return; + } + + lio = GET_LIO(netdev); + + dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) + liquidio_stop(netdev); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) + unregister_netdev(netdev); + + cleanup_link_status_change_wq(netdev); + + delete_glists(lio); + + free_netdev(netdev); + + oct->props[ifidx].gmxport = -1; + + oct->props[ifidx].netdev = NULL; +} + +/** + * \brief Stop complete NIC functionality + * @param oct octeon device + */ +static int liquidio_stop_nic_module(struct octeon_device *oct) +{ + struct lio *lio; + int i, j; + + dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + if (!oct->ifcount) { + dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); + return 1; + } + + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + + for (i = 0; i < oct->ifcount; i++) { + lio = GET_LIO(oct->props[i].netdev); + for (j = 0; j < lio->linfo.num_rxpciq; j++) + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); + } + + for (i = 0; i < oct->ifcount; i++) + liquidio_destroy_nic_device(oct, i); + + dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); + return 0; +} + +/** + * \brief Cleans up resources at unload time + * @param pdev PCI device structure + */ +static void liquidio_vf_remove(struct pci_dev *pdev) +{ + struct octeon_device *oct_dev = pci_get_drvdata(pdev); + + dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); + + if (oct_dev->app_mode == CVM_DRV_NIC_APP) + liquidio_stop_nic_module(oct_dev); + + /* Reset the octeon device and cleanup all memory allocated for + * the octeon device by driver. + */ + octeon_destroy_resources(oct_dev); + + dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); + + /* This octeon device has been removed. Update the global + * data structure to reflect this. Free the device structure. + */ + octeon_free_device_mem(oct_dev); +} + +/** + * \brief PCI initialization for each Octeon device. + * @param oct octeon device + */ +static int octeon_pci_os_setup(struct octeon_device *oct) +{ +#ifdef CONFIG_PCI_IOV + /* setup PCI stuff first */ + if (!oct->pci_dev->physfn) + octeon_pci_flr(oct); +#endif + + if (pci_enable_device(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); + return 1; + } + + if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { + dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); + pci_disable_device(oct->pci_dev); + return 1; + } + + /* Enable PCI DMA Master. */ + pci_set_master(oct->pci_dev); + + return 0; +} + +static int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + int q = 0; + + if (netif_is_multiqueue(lio->netdev)) + q = skb->queue_mapping % lio->linfo.num_txpciq; + + return q; +} + +/** + * \brief Check Tx queue state for a given network buffer + * @param lio per-network private data + * @param skb network buffer + */ +static int check_txq_state(struct lio *lio, struct sk_buff *skb) +{ + int q = 0, iq = 0; + + if (netif_is_multiqueue(lio->netdev)) { + q = skb->queue_mapping; + iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; + } else { + iq = lio->txq; + q = iq; + } + + if (octnet_iq_is_full(lio->oct_dev, iq)) + return 0; + + if (__netif_subqueue_stopped(lio->netdev, q)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); + wake_q(lio->netdev, q); + } + + return 1; +} + +/** + * \brief Unmap and free network buffer + * @param buf buffer + */ +static void free_netbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, + DMA_TO_DEVICE); + + check_txq_state(lio, skb); + + tx_buffer_free(skb); +} + +/** + * \brief Unmap and free gather buffer + * @param buf buffer + */ +static void free_netsgbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octnic_gather *g; + struct sk_buff *skb; + int i, frags, iq; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + pci_unmap_page((lio->oct_dev)->pci_dev, + g->sg[(i >> 2)].ptr[(i & 3)], + frag->size, DMA_TO_DEVICE); + i++; + } + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + finfo->dptr, g->sg_size, + DMA_TO_DEVICE); + + iq = skb_iq(lio, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + check_txq_state(lio, skb); /* mq support: sub-queue state check */ + + tx_buffer_free(skb); +} + +/** + * \brief Unmap and free gather buffer with response + * @param buf buffer + */ +static void free_netsgbuf_with_resp(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octeon_soft_command *sc; + struct octnic_gather *g; + struct sk_buff *skb; + int i, frags, iq; + struct lio *lio; + + sc = (struct octeon_soft_command *)buf; + skb = (struct sk_buff *)sc->callback_arg; + finfo = (struct octnet_buf_free_info *)&skb->cb; + + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + pci_unmap_page((lio->oct_dev)->pci_dev, + g->sg[(i >> 2)].ptr[(i & 3)], + frag->size, DMA_TO_DEVICE); + i++; + } + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + finfo->dptr, g->sg_size, + DMA_TO_DEVICE); + + iq = skb_iq(lio, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + /* Don't free the skb yet */ + + check_txq_state(lio, skb); +} + +/** + * \brief Setup output queue + * @param oct octeon device + * @param q_no which queue + * @param num_descs how many descriptors + * @param desc_size size of each descriptor + * @param app_ctx application context + */ +static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) +{ + int ret_val; + + dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); + /* droq creation and local register settings. */ + ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (ret_val < 0) + return ret_val; + + if (ret_val == 1) { + dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); + return 0; + } + + /* Enable the droq queues */ + octeon_set_droq_pkt_op(oct, q_no, 1); + + /* Send Credit for Octeon Output queues. Credits are always + * sent after the output queue is enabled. + */ + writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); + + return ret_val; +} + +/** + * \brief Callback for getting interface configuration + * @param status status of request + * @param buf pointer to resp structure + */ +static void if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_context *ctx; + struct liquidio_if_cfg_resp *resp; + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (resp->status) + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", + CVM_CAST64(resp->status)); + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Select queue based on hash + * @param dev Net device + * @param skb sk_buff structure + * @returns selected queue number + */ +static u16 select_q(struct net_device *dev, struct sk_buff *skb, + void *accel_priv __attribute__((unused)), + select_queue_fallback_t fallback __attribute__((unused))) +{ + struct lio *lio; + u32 qindex; + + lio = GET_LIO(dev); + + qindex = skb_tx_hash(dev, skb); + + return (u16)(qindex % (lio->linfo.num_txpciq)); +} + +/** Routine to push packets arriving on Octeon interface upto network layer. + * @param oct_id - octeon device id. + * @param skbuff - skbuff struct to be passed to network layer. + * @param len - size of total data received. + * @param rh - Control header associated with the packet + * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops + */ +static void +liquidio_push_packet(u32 octeon_id __attribute__((unused)), + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param, + void *arg) +{ + struct napi_struct *napi = param; + struct octeon_droq *droq = + container_of(param, struct octeon_droq, napi); + struct net_device *netdev = (struct net_device *)arg; + struct sk_buff *skb = (struct sk_buff *)skbuff; + u16 vtag = 0; + + if (netdev) { + struct lio *lio = GET_LIO(netdev); + int packet_was_received; + + /* Do not proceed if the interface is not in RUNNING state. */ + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + recv_buffer_free(skb); + droq->stats.rx_dropped++; + return; + } + + skb->dev = netdev; + + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, + page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + skb_pull(skb, rh->r_dh.len * 8); + skb->protocol = eth_type_trans(skb, skb->dev); + + if ((netdev->features & NETIF_F_RXCSUM) && + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) + /* checksum has already been verified */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + rh->r_dh.vlan) { + u16 priority = rh->r_dh.priority; + u16 vid = rh->r_dh.vlan; + + vtag = (priority << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); + + if (packet_was_received) { + droq->stats.rx_bytes_received += len; + droq->stats.rx_pkts_received++; + netdev->last_rx = jiffies; + } else { + droq->stats.rx_dropped++; + netif_info(lio, rx_err, lio->netdev, + "droq:%d error rx_dropped:%llu\n", + droq->q_no, droq->stats.rx_dropped); + } + + } else { + recv_buffer_free(skb); + } +} + +/** + * \brief callback when receive interrupt occurs and we are in NAPI mode + * @param arg pointer to octeon output queue + */ +static void liquidio_vf_napi_drv_callback(void *arg) +{ + struct octeon_droq *droq = arg; + + napi_schedule_irqoff(&droq->napi); +} + +/** + * \brief Entry point for NAPI polling + * @param napi NAPI structure + * @param budget maximum number of items to process + */ +static int liquidio_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_instr_queue *iq; + struct octeon_device *oct; + struct octeon_droq *droq; + int tx_done = 0, iq_no; + int work_done; + + droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); + + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, 1, budget); + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + update_txq_status(oct, iq_no); + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } + + if ((work_done < budget) && (tx_done)) { + napi_complete(napi); + octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, + POLL_EVENT_ENABLE_INTR, 0); + return 0; + } + + return (!tx_done) ? (budget) : (work_done); +} + +/** + * \brief Setup input and output queues + * @param octeon_dev octeon device + * @param ifidx Interface index + * + * Note: Queues are with respect to the octeon device. Thus + * an input queue is for egress packets, and output queues + * are for ingress packets. + */ +static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) +{ + struct octeon_droq_ops droq_ops; + struct net_device *netdev; + static int cpu_id_modulus; + struct octeon_droq *droq; + struct napi_struct *napi; + static int cpu_id; + int num_tx_descs; + struct lio *lio; + int retval = 0; + int q, q_no; + + netdev = octeon_dev->props[ifidx].netdev; + + lio = GET_LIO(netdev); + + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_vf_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); + + /* set up DROQs. */ + for (q = 0; q < lio->linfo.num_rxpciq; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + + retval = octeon_setup_droq( + octeon_dev, q_no, + CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + NULL); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "%s : Runtime DROQ(RxQ) creation failed.\n", + __func__); + return 1; + } + + droq = octeon_dev->droq[q_no]; + napi = &droq->napi; + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); + + /* designate a CPU for this droq */ + droq->cpu_id = cpu_id; + cpu_id++; + if (cpu_id >= cpu_id_modulus) + cpu_id = 0; + + octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); + } + + /* 23XX VF can send/recv control messages (via the first VF-owned + * droq) from the firmware even if the ethX interface is down, + * so that's why poll_mode must be off for the first droq. + */ + octeon_dev->droq[0]->ops.poll_mode = 0; + + /* set up IQs. */ + for (q = 0; q < lio->linfo.num_txpciq; q++) { + num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( + octeon_get_conf(octeon_dev), lio->ifidx); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime IQ(TxQ) creation failed.\n", + __func__); + return 1; + } + } + + return 0; +} + +/** + * \brief Net device open for LiquidIO + * @param netdev network device + */ +static int liquidio_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (!oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + + oct->droq[0]->ops.poll_mode = 1; + } + + ifstate_set(lio, LIO_IFSTATE_RUNNING); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); + start_txq(netdev); + + /* tell Octeon to start forwarding packets to host */ + send_rx_ctrl_cmd(lio, 1); + + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); + + return 0; +} + +/** + * \brief Net device stop for LiquidIO + * @param netdev network device + */ +static int liquidio_stop(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + /* Inform that netif carrier is down */ + lio->intf_open = 0; + lio->linfo.link.s.link_up = 0; + + netif_carrier_off(netdev); + lio->link_changes++; + + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + txqs_stop(netdev); + + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); + + return 0; +} + +/** + * \brief Converts a mask based on net device flags + * @param netdev network device + * + * This routine generates a octnet_ifflags mask from the net device flags + * received from the OS. + */ +static enum octnet_ifflags get_new_flags(struct net_device *netdev) +{ + enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; + + if (netdev->flags & IFF_PROMISC) + f |= OCTNET_IFFLAG_PROMISC; + + if (netdev->flags & IFF_ALLMULTI) + f |= OCTNET_IFFLAG_ALLMULTI; + + if (netdev->flags & IFF_MULTICAST) { + f |= OCTNET_IFFLAG_MULTICAST; + + /* Accept all multicast addresses if there are more than we + * can handle + */ + if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) + f |= OCTNET_IFFLAG_ALLMULTI; + } + + if (netdev->flags & IFF_BROADCAST) + f |= OCTNET_IFFLAG_BROADCAST; + + return f; +} + +static void liquidio_set_uc_list(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct netdev_hw_addr *ha; + u64 *mac; + + if (lio->netdev_uc_count == netdev_uc_count(netdev)) + return; + + if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { + dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); + return; + } + + lio->netdev_uc_count = netdev_uc_count(netdev); + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; + nctrl.ncmd.s.more = lio->netdev_uc_count; + nctrl.ncmd.s.param1 = oct->vf_num; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + mac = &nctrl.udd[0]; + netdev_for_each_uc_addr(ha, netdev) { + ether_addr_copy(((u8 *)mac) + 2, ha->addr); + mac++; + } + + octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); +} + +/** + * \brief Net device set_multicast_list + * @param netdev network device + */ +static void liquidio_set_mcast_list(struct net_device *netdev) +{ + int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct netdev_hw_addr *ha; + u64 *mc; + int ret; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + /* Create a ctrl pkt command to be sent to core app. */ + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; + nctrl.ncmd.s.more = mc_count; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + mc = &nctrl.udd[0]; + netdev_for_each_mc_addr(ha, netdev) { + *mc = 0; + ether_addr_copy(((u8 *)mc) + 2, ha->addr); + /* no need to swap bytes */ + if (++mc > &nctrl.udd[mc_count]) + break; + } + + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + + /* Apparently, any activity in this call from the kernel has to + * be atomic. So we won't wait for response. + */ + nctrl.wait_time = 0; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", + ret); + } + + liquidio_set_uc_list(netdev); +} + +/** + * \brief Net device set_mac_address + * @param netdev network device + */ +static int liquidio_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = (struct sockaddr *)p; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) + return 0; + + if (lio->linfo.macaddr_is_admin_asgnd) + return -EPERM; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + nctrl.ncmd.s.param1 = 0; + nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + nctrl.wait_time = 100; + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); + return -ENOMEM; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); + + return 0; +} + +/** + * \brief Net device get_stats + * @param netdev network device + */ +static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct net_device_stats *stats = &netdev->stats; + u64 pkts = 0, drop = 0, bytes = 0; + struct oct_droq_stats *oq_stats; + struct oct_iq_stats *iq_stats; + struct octeon_device *oct; + int i, iq_no, oq_no; + + oct = lio->oct_dev; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + iq_no = lio->linfo.txpciq[i].s.q_no; + iq_stats = &oct->instr_queue[iq_no]->stats; + pkts += iq_stats->tx_done; + drop += iq_stats->tx_dropped; + bytes += iq_stats->tx_tot_bytes; + } + + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_dropped = drop; + + pkts = 0; + drop = 0; + bytes = 0; + + for (i = 0; i < lio->linfo.num_rxpciq; i++) { + oq_no = lio->linfo.rxpciq[i].s.q_no; + oq_stats = &oct->droq[oq_no]->stats; + pkts += oq_stats->rx_pkts_received; + drop += (oq_stats->rx_dropped + + oq_stats->dropped_nodispatch + + oq_stats->dropped_toomany + + oq_stats->dropped_nomem); + bytes += oq_stats->rx_bytes_received; + } + + stats->rx_bytes = bytes; + stats->rx_packets = pkts; + stats->rx_dropped = drop; + + return stats; +} + +/** + * \brief Net device change_mtu + * @param netdev network device + */ +static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->mtu = new_mtu; + + netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", + netdev->mtu, new_mtu); + dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + return 0; +} + +/** + * \brief Handler for SIOCSHWTSTAMP ioctl + * @param netdev network device + * @param ifr interface request + * @param cmd command + */ +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + struct lio *lio = GET_LIO(netdev); + struct hwtstamp_config conf; + + if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) + return -EFAULT; + + if (conf.flags) + return -EINVAL; + + switch (conf.tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + switch (conf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + conf.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + else + ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; +} + +/** + * \brief ioctl handler + * @param netdev network device + * @param ifr interface request + * @param cmd command + */ +static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return hwtstamp_ioctl(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) +{ + struct sk_buff *skb = (struct sk_buff *)buf; + struct octnet_buf_free_info *finfo; + struct oct_timestamp_resp *resp; + struct octeon_soft_command *sc; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)skb->cb; + lio = finfo->lio; + sc = finfo->sc; + oct = lio->oct_dev; + resp = (struct oct_timestamp_resp *)sc->virtrptr; + + if (status != OCTEON_REQUEST_DONE) { + dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", + CVM_CAST64(status)); + resp->timestamp = 0; + } + + octeon_swap_8B_data(&resp->timestamp, 1); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps ts; + u64 ns = resp->timestamp; + + netif_info(lio, tx_done, lio->netdev, + "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", + skb, (unsigned long long)ns); + ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); + skb_tstamp_tx(skb, &ts); + } + + octeon_free_soft_command(oct, sc); + tx_buffer_free(skb); +} + +/* \brief Send a data packet that will be timestamped + * @param oct octeon device + * @param ndata pointer to network data + * @param finfo pointer to private network data + */ +static int send_nic_timestamp_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + struct octnet_buf_free_info *finfo) +{ + struct octeon_soft_command *sc; + int ring_doorbell; + struct lio *lio; + int retval; + u32 len; + + lio = finfo->lio; + + sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, + sizeof(struct oct_timestamp_resp)); + finfo->sc = sc; + + if (!sc) { + dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); + return IQ_SEND_FAILED; + } + + if (ndata->reqtype == REQTYPE_NORESP_NET) + ndata->reqtype = REQTYPE_RESP_NET; + else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) + ndata->reqtype = REQTYPE_RESP_NET_SG; + + sc->callback = handle_timestamp; + sc->callback_arg = finfo->skb; + sc->iq_no = ndata->q_no; + + len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; + + ring_doorbell = 1; + + retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, + sc, len, ndata->reqtype); + + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", + retval); + octeon_free_soft_command(oct, sc); + } else { + netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); + } + + return retval; +} + +/** \brief Transmit networks packets to the Octeon interface + * @param skbuff skbuff struct to be passed to network layer. + * @param netdev pointer to network device + * @returns whether the packet was transmitted to the device okay or not + * (NETDEV_TX_OK or NETDEV_TX_BUSY) + */ +static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octnet_buf_free_info *finfo; + union octnic_cmd_setup cmdsetup; + struct octnic_data_pkt ndata; + struct octeon_instr_irh *irh; + struct oct_iq_stats *stats; + struct octeon_device *oct; + int q_idx = 0, iq_no = 0; + union tx_info *tx_info; + struct lio *lio; + int status = 0; + u64 dptr = 0; + u32 tag = 0; + int j; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + if (netif_is_multiqueue(netdev)) { + q_idx = skb->queue_mapping; + q_idx = (q_idx % (lio->linfo.num_txpciq)); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; + } else { + iq_no = lio->txq; + } + + stats = &oct->instr_queue[iq_no]->stats; + + /* Check for all conditions in which the current packet cannot be + * transmitted. + */ + if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || + (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { + netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", + lio->linfo.link.s.link_up); + goto lio_xmit_failed; + } + + /* Use space in skb->cb to store info used to unmap and + * free the buffers. + */ + finfo = (struct octnet_buf_free_info *)skb->cb; + finfo->lio = lio; + finfo->skb = skb; + finfo->sc = NULL; + + /* Prepare the attributes for the data to be passed to OSI. */ + memset(&ndata, 0, sizeof(struct octnic_data_pkt)); + + ndata.buf = finfo; + + ndata.q_no = iq_no; + + if (netif_is_multiqueue(netdev)) { + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; + } + } else { + if (octnet_iq_is_full(oct, lio->txq)) { + /* defer sending if queue is full */ + stats->tx_iq_busy++; + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + return NETDEV_TX_BUSY; + } + } + + ndata.datasize = skb->len; + + cmdsetup.u64 = 0; + cmdsetup.s.iq_no = iq_no; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) { + cmdsetup.s.tnl_csum = 1; + stats->tx_vxlan++; + } else { + cmdsetup.s.transport_csum = 1; + } + } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + cmdsetup.s.timestamp = 1; + } + + if (!skb_shinfo(skb)->nr_frags) { + cmdsetup.s.u.datasize = skb->len; + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + /* Offload checksum calculation for TCP/UDP packets */ + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", + __func__); + return NETDEV_TX_BUSY; + } + + ndata.cmd.cmd3.dptr = dptr; + finfo->dptr = dptr; + ndata.reqtype = REQTYPE_NORESP_NET; + + } else { + struct skb_frag_struct *frag; + struct octnic_gather *g; + int i, frags; + + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *)list_delete_head( + &lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); + + if (!g) { + netif_info(lio, tx_err, lio->netdev, + "Transmit scatter gather: glist null!\n"); + goto lio_xmit_failed; + } + + cmdsetup.s.gather = 1; + cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + + memset(g->sg, 0, g->sg_size); + + g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, + skb->data, + (skb->len - skb->data_len), + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); + + frags = skb_shinfo(skb)->nr_frags; + i = 1; + while (frags--) { + frag = &skb_shinfo(skb)->frags[i - 1]; + + g->sg[(i >> 2)].ptr[(i & 3)] = + dma_map_page(&oct->pci_dev->dev, + frag->page.p, + frag->page_offset, + frag->size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + frag->size, + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + + add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + i++; + } + + dptr = dma_map_single(&oct->pci_dev->dev, + g->sg, g->sg_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n", + __func__); + dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j <= frags; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + frag->size, DMA_TO_DEVICE); + } + return NETDEV_TX_BUSY; + } + + ndata.cmd.cmd3.dptr = dptr; + finfo->dptr = dptr; + finfo->g = g; + + ndata.reqtype = REQTYPE_NORESP_NET_SG; + } + + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; + + if (skb_shinfo(skb)->gso_size) { + tx_info->s.gso_size = skb_shinfo(skb)->gso_size; + tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; + irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; + } + + if (unlikely(cmdsetup.s.timestamp)) + status = send_nic_timestamp_pkt(oct, &ndata, finfo); + else + status = octnet_send_nic_data_pkt(oct, &ndata); + if (status == IQ_SEND_FAILED) + goto lio_xmit_failed; + + netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); + + if (status == IQ_SEND_STOP) { + dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", + iq_no); + stop_q(lio->netdev, q_idx); + } + + netif_trans_update(netdev); + + if (skb_shinfo(skb)->gso_size) + stats->tx_done += skb_shinfo(skb)->gso_segs; + else + stats->tx_done++; + stats->tx_tot_bytes += skb->len; + + return NETDEV_TX_OK; + +lio_xmit_failed: + stats->tx_dropped++; + netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", + iq_no, stats->tx_dropped); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + tx_buffer_free(skb); + return NETDEV_TX_OK; +} + +/** \brief Network device Tx timeout + * @param netdev pointer to network device + */ +static void liquidio_tx_timeout(struct net_device *netdev) +{ + struct lio *lio; + + lio = GET_LIO(netdev); + + netif_info(lio, tx_err, lio->netdev, + "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", + netdev->stats.tx_dropped); + netif_trans_update(netdev); + txqs_wake(netdev); +} + +static int +liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + + return ret; +} + +static int +liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to enable/disable RX checksum offload + * @param netdev pointer to network device + * @param command OCTNET_CMD_TNL_RX_CSUM_CTL + * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ + * OCTNET_CMD_RXCSUM_DISABLE + * @returns SUCCESS or FAILURE + */ +static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.param1 = rx_cmd; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to add/delete VxLAN UDP port to firmware + * @param netdev pointer to network device + * @param command OCTNET_CMD_VXLAN_PORT_CONFIG + * @param vxlan_port VxLAN port to be added or deleted + * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, + * OCTNET_CMD_VXLAN_PORT_DEL + * @returns SUCCESS or FAILURE + */ +static int liquidio_vxlan_port_command(struct net_device *netdev, int command, + u16 vxlan_port, u8 vxlan_cmd_bit) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.more = vxlan_cmd_bit; + nctrl.ncmd.s.param1 = vxlan_port; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, + "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", + ret); + } + return ret; +} + +/** \brief Net device fix features + * @param netdev pointer to network device + * @param request features requested + * @returns updated features list + */ +static netdev_features_t liquidio_fix_features(struct net_device *netdev, + netdev_features_t request) +{ + struct lio *lio = netdev_priv(netdev); + + if ((request & NETIF_F_RXCSUM) && + !(lio->dev_capability & NETIF_F_RXCSUM)) + request &= ~NETIF_F_RXCSUM; + + if ((request & NETIF_F_HW_CSUM) && + !(lio->dev_capability & NETIF_F_HW_CSUM)) + request &= ~NETIF_F_HW_CSUM; + + if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) + request &= ~NETIF_F_TSO; + + if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) + request &= ~NETIF_F_TSO6; + + if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + /* Disable LRO if RXCSUM is off */ + if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + return request; +} + +/** \brief Net device set features + * @param netdev pointer to network device + * @param features features to enable/disable + */ +static int liquidio_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lio *lio = netdev_priv(netdev); + + if (!((netdev->features ^ features) & NETIF_F_LRO)) + return 0; + + if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + else if (!(features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + if (!(netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + (features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + else if ((netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + !(features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_DISABLE); + + return 0; +} + +static void liquidio_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static void liquidio_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + +static const struct net_device_ops lionetdevops = { + .ndo_open = liquidio_open, + .ndo_stop = liquidio_stop, + .ndo_start_xmit = liquidio_xmit, + .ndo_get_stats = liquidio_get_stats, + .ndo_set_mac_address = liquidio_set_mac, + .ndo_set_rx_mode = liquidio_set_mcast_list, + .ndo_tx_timeout = liquidio_tx_timeout, + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, + .ndo_change_mtu = liquidio_change_mtu, + .ndo_do_ioctl = liquidio_ioctl, + .ndo_fix_features = liquidio_fix_features, + .ndo_set_features = liquidio_set_features, + .ndo_udp_tunnel_add = liquidio_add_vxlan_port, + .ndo_udp_tunnel_del = liquidio_del_vxlan_port, + .ndo_select_queue = select_q, +}; + +static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + union oct_link_status *ls; + int gmxport = 0; + int i; + + if (recv_pkt->buffer_size[0] != sizeof(*ls)) { + dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", + recv_pkt->buffer_size[0], + recv_pkt->rh.r_nic_info.gmxport); + goto nic_info_err; + } + + gmxport = recv_pkt->rh.r_nic_info.gmxport; + ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); + + octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); + + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } + +nic_info_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +/** + * \brief Setup network interfaces + * @param octeon_dev octeon device + * + * Called during init time for each device. It assumes the NIC + * is already up and running. The link information for each + * interface is passed in link_info. + */ +static int setup_nic_devices(struct octeon_device *octeon_dev) +{ + int retval, num_iqueues, num_oqueues; + struct liquidio_if_cfg_context *ctx; + u32 resp_size, ctx_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct octdev_props *props; + struct net_device *netdev; + struct lio_version *vdata; + struct lio *lio = NULL; + u8 mac[ETH_ALEN], i, j; + u32 ifidx_or_pfnum; + + ifidx_or_pfnum = octeon_dev->pf_num; + + /* This is to handle link status changes */ + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, + lio_nic_info, octeon_dev); + + /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. + * They are handled directly. + */ + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, + free_netbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, + free_netsgbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, + free_netsgbuf_with_resp); + + for (i = 0; i < octeon_dev->ifcount; i++) { + resp_size = sizeof(struct liquidio_if_cfg_resp); + ctx_size = sizeof(struct liquidio_if_cfg_context); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(octeon_dev, data_size, + resp_size, ctx_size); + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + vdata = (struct lio_version *)sc->virtdptr; + + *((u64 *)vdata) = 0; + vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(octeon_dev); + init_waitqueue_head(&ctx->wc); + + if_cfg.u64 = 0; + + if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; + if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; + if_cfg.s.base_queue = 0; + + sc->iq_no = 0; + + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, + OPCODE_NIC_IF_CFG, 0, if_cfg.u64, + 0); + + sc->callback = if_cfg_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(octeon_dev, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed status: %x\n", retval); + /* Soft instr is freed by driver in case of failure. */ + goto setup_nic_dev_fail; + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { + dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); + goto setup_nic_wait_intr; + } + + retval = resp->status; + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); + goto setup_nic_dev_fail; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + num_iqueues = hweight64(resp->cfg_info.iqmask); + num_oqueues = hweight64(resp->cfg_info.oqmask); + + if (!(num_iqueues) || !(num_oqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", + resp->cfg_info.iqmask, resp->cfg_info.oqmask); + goto setup_nic_dev_fail; + } + dev_dbg(&octeon_dev->pci_dev->dev, + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, + num_iqueues, num_oqueues); + + netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + + if (!netdev) { + dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); + goto setup_nic_dev_fail; + } + + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); + + /* Associate the routines that will handle different + * netdev tasks. + */ + netdev->netdev_ops = &lionetdevops; + + lio = GET_LIO(netdev); + + memset(lio, 0, sizeof(struct lio)); + + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; + + lio->linfo.num_rxpciq = num_oqueues; + lio->linfo.num_txpciq = num_iqueues; + + for (j = 0; j < num_oqueues; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + for (j = 0; j < num_iqueues; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->linfo.macaddr_is_admin_asgnd = + resp->cfg_info.linfo.macaddr_is_admin_asgnd; + + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + lio->dev_capability = NETIF_F_HIGHDMA + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_GRO + | NETIF_F_LRO; + netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device + */ + lio->enc_dev_capability = NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_GSO_UDP_TUNNEL + | NETIF_F_HW_CSUM | NETIF_F_SG + | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + + netdev->hw_enc_features = + (lio->enc_dev_capability & ~NETIF_F_LRO); + netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); + + netdev->hw_features = lio->dev_capability; + + /* MTU range: 68 - 16000 */ + netdev->min_mtu = LIO_MIN_MTU_SIZE; + netdev->max_mtu = LIO_MAX_MTU_SIZE; + + /* Point to the properties for octeon device to which this + * interface belongs. + */ + lio->oct_dev = octeon_dev; + lio->octprops = props; + lio->netdev = netdev; + + dev_dbg(&octeon_dev->pci_dev->dev, + "if%d gmx: %d hw_addr: 0x%llx\n", i, + lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); + + /* 64-bit swap required on LE machines */ + octeon_swap_8B_data(&lio->linfo.hw_addr, 1); + for (j = 0; j < ETH_ALEN; j++) + mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); + + /* Copy MAC Address to OS network device structure */ + ether_addr_copy(netdev->dev_addr, mac); + + if (setup_io_queues(octeon_dev, i)) { + dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); + goto setup_nic_dev_fail; + } + + ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); + + /* For VFs, enable Octeon device interrupts here, + * as this is contingent upon IO queue setup + */ + octeon_dev->fn_list.enable_interrupt(octeon_dev, + OCTEON_ALL_INTR); + + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); + lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); + + if (setup_glists(lio, num_iqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Gather list allocation failed\n"); + goto setup_nic_dev_fail; + } + + /* Register ethtool support */ + liquidio_set_ethtool_ops(netdev); + if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) + octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; + else + octeon_dev->priv_flags = 0x0; + + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + if ((debug != -1) && (debug & NETIF_MSG_HW)) + liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, + 0); + + if (setup_link_status_change_wq(netdev)) + goto setup_nic_dev_fail; + + /* Register the network device with the OS */ + if (register_netdev(netdev)) { + dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); + goto setup_nic_dev_fail; + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", + i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + netif_carrier_off(netdev); + lio->link_changes++; + + ifstate_set(lio, LIO_IFSTATE_REGISTERED); + + /* Sending command to firmware to enable Rx checksum offload + * by default at the time of setup of Liquidio driver for + * this device + */ + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, + OCTNET_CMD_TXCSUM_ENABLE); + + dev_dbg(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup successful\n", i); + + octeon_free_soft_command(octeon_dev, sc); + } + + return 0; + +setup_nic_dev_fail: + + octeon_free_soft_command(octeon_dev, sc); + +setup_nic_wait_intr: + + while (i--) { + dev_err(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup failed\n", i); + liquidio_destroy_nic_device(octeon_dev, i); + } + return -ENODEV; +} + +/** + * \brief initialize the NIC + * @param oct octeon device + * + * This initialization routine is called once the Octeon device application is + * up and running + */ +static int liquidio_init_nic_module(struct octeon_device *oct) +{ + struct oct_intrmod_cfg *intrmod_cfg; + int num_nic_ports = 1; + int i, retval = 0; + + dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); + + /* only default iq and oq were initialized + * initialize the rest as well run port_config command for each port + */ + oct->ifcount = num_nic_ports; + memset(oct->props, 0, + sizeof(struct octdev_props) * num_nic_ports); + + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + + retval = setup_nic_devices(oct); + if (retval) { + dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); + goto octnet_init_failure; + } + + /* Initialize interrupt moderation params */ + intrmod_cfg = &((struct octeon_device *)oct)->intrmod; + intrmod_cfg->rx_enable = 1; + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; + intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; + intrmod_cfg->tx_enable = 1; + intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; + intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; + intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); + intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); + dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); + + return retval; + +octnet_init_failure: + + oct->ifcount = 0; + + return retval; +} + +/** + * \brief Device initialization for each Octeon device that is probed + * @param octeon_dev octeon device + */ +static int octeon_device_init(struct octeon_device *oct) +{ + u32 rev_id; + int j; + + atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); + + /* Enable access to the octeon device and make its DMA capability + * known to the OS. + */ + if (octeon_pci_os_setup(oct)) + return 1; + atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); + + oct->chip_id = OCTEON_CN23XX_VF_VID; + pci_read_config_dword(oct->pci_dev, 8, &rev_id); + oct->rev_id = rev_id & 0xff; + + if (cn23xx_setup_octeon_vf_device(oct)) + return 1; + + atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); + + oct->app_mode = CVM_DRV_NIC_APP; + + /* Initialize the dispatch mechanism used to push packets arriving on + * Octeon Output queues. + */ + if (octeon_init_dispatch_list(oct)) + return 1; + + atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); + + if (octeon_set_io_queues_off(oct)) { + dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + return 1; + } + + if (oct->fn_list.setup_device_regs(oct)) { + dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); + return 1; + } + + /* Initialize soft command buffer pool */ + if (octeon_setup_sc_buffer_pool(oct)) { + dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); + + /* Setup the data structures that manage this Octeon's Input queues. */ + if (octeon_setup_instr_queues(oct)) { + dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); + + /* Initialize lists to manage the requests of different types that + * arrive from user & kernel applications for this octeon device. + */ + if (octeon_setup_response_list(oct)) { + dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); + + if (octeon_setup_output_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); + + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); + + if (octeon_allocate_ioq_vector(oct)) { + dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); + + dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n", + LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); + + /* Setup the interrupt handler and record the INT SUM register address*/ + if (octeon_setup_interrupt(oct)) + return 1; + + if (cn23xx_octeon_pfvf_handshake(oct)) + return 1; + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); + return 1; + } + + atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); + + atomic_set(&oct->status, OCT_DEV_HOST_OK); + + /* Send Credit for Octeon Output queues. Credits are always sent after + * the output queue is enabled. + */ + for (j = 0; j < oct->num_oqs; j++) + writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); + + /* Packets can start arriving on the output queues from this point. */ + + atomic_set(&oct->status, OCT_DEV_CORE_OK); + + atomic_set(&oct->status, OCT_DEV_RUNNING); + + if (liquidio_init_nic_module(oct)) + return 1; + + return 0; +} + +static int __init liquidio_vf_init(void) +{ + octeon_init_device_list(0); + return pci_register_driver(&liquidio_vf_pci_driver); +} + +static void __exit liquidio_vf_exit(void) +{ + pci_unregister_driver(&liquidio_vf_pci_driver); + + pr_info("LiquidIO_VF network module is now unloaded\n"); +} + +module_init(liquidio_vf_init); +module_exit(liquidio_vf_exit); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 0d990accb65e..ba329f6ca779 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file liquidio_common.h * \brief Common: Structures and macros used in PCI-NIC package by core and * host driver. @@ -68,12 +63,10 @@ enum octeon_tag_type { */ #define OPCODE_CORE 0 /* used for generic core operations */ #define OPCODE_NIC 1 /* used for NIC operations */ -#define OPCODE_LAST OPCODE_NIC - /* Subcodes are used by host driver/apps to identify the sub-operation * for the core. They only need to by unique for a given subsystem. */ -#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f)) +#define OPCODE_SUBCODE(op, sub) ((((op) & 0x0f) << 8) | ((sub) & 0x7f)) /** OPCODE_CORE subcodes. For future use. */ @@ -89,13 +82,13 @@ enum octeon_tag_type { #define OPCODE_NIC_TIMESTAMP 0x07 #define OPCODE_NIC_INTRMOD_CFG 0x08 #define OPCODE_NIC_IF_CFG 0x09 +#define OPCODE_NIC_VF_DRV_NOTICE 0x0A +#define VF_DRV_LOADED 1 +#define VF_DRV_REMOVED -1 +#define VF_DRV_MACADDR_CHANGED 2 #define CORE_DRV_TEST_SCATTER_OP 0xFFF5 -#define OPCODE_SLOW_PATH(rh) \ - (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \ - OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA)) - /* Application codes advertised by the core driver initialization packet. */ #define CVM_DRV_APP_START 0x0 #define CVM_DRV_NO_APP 0 @@ -105,31 +98,15 @@ enum octeon_tag_type { #define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) -/* Macro to increment index. - * Index is incremented by count; if the sum exceeds - * max, index is wrapped-around to the start. - */ -#define INCR_INDEX(index, count, max) \ -do { \ - if (((index) + (count)) >= (max)) \ - index = ((index) + (count)) - (max); \ - else \ - index += (count); \ -} while (0) - -#define INCR_INDEX_BY1(index, max) \ -do { \ - if ((++(index)) == (max)) \ - index = 0; \ -} while (0) - -#define DECR_INDEX(index, count, max) \ -do { \ - if ((count) > (index)) \ - index = ((max) - ((count - index))); \ - else \ - index -= count; \ -} while (0) +static inline u32 incr_index(u32 index, u32 count, u32 max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} #define OCT_BOARD_NAME 32 #define OCT_SERIAL_LEN 64 @@ -235,6 +212,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_ID_ACTIVE 0x1a +#define OCTNET_CMD_SET_UC_LIST 0x1b +#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 @@ -731,13 +710,15 @@ struct oct_link_info { #ifdef __BIG_ENDIAN_BITFIELD u64 gmxport:16; - u64 rsvd:32; + u64 macaddr_is_admin_asgnd:1; + u64 rsvd:31; u64 num_txpciq:8; u64 num_rxpciq:8; #else u64 num_rxpciq:8; u64 num_txpciq:8; - u64 rsvd:32; + u64 rsvd:31; + u64 macaddr_is_admin_asgnd:1; u64 gmxport:16; #endif @@ -827,6 +808,16 @@ struct oct_link_stats { }; +static inline int opcode_slow_path(union octeon_rh *rh) +{ + u16 subcode1, subcode2; + + subcode1 = OPCODE_SUBCODE((rh)->r.opcode, (rh)->r.subcode); + subcode2 = OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA); + + return (subcode2 != subcode1); +} + #define LIO68XX_LED_CTRL_ADDR 0x3501 #define LIO68XX_LED_CTRL_CFGON 0x1f #define LIO68XX_LED_CTRL_CFGOFF 0x100 diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h index 93819bd8602b..78a3685f6fe0 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #ifndef _LIQUIDIO_IMAGE_H_ #define _LIQUIDIO_IMAGE_H_ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index c76556809ed1..1cb3514fc949 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file octeon_config.h * \brief Host Driver: Configuration data structures for the host driver. */ @@ -65,9 +60,15 @@ #define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2 /* CN23xx IQ configuration macros */ +#define CN23XX_MAX_VFS_PER_PF_PASS_1_0 8 +#define CN23XX_MAX_VFS_PER_PF_PASS_1_1 31 +#define CN23XX_MAX_VFS_PER_PF 63 +#define CN23XX_MAX_RINGS_PER_VF 8 + #define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12 #define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32 #define CN23XX_MAX_RINGS_PER_PF 64 +#define CN23XX_MAX_RINGS_PER_VF 8 #define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF #define CN23XX_MAX_IQ_DESCRIPTORS 2048 @@ -466,4 +467,7 @@ struct octeon_config { #define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES #define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES + +#define MAX_POSSIBLE_VFS 64 + #endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 01a50f3b0c8e..3265e0b7923e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /** * @file octeon_console.c */ @@ -76,9 +71,9 @@ MODULE_PARM_DESC(console_bitmask, #define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */ /* First three members of cvmx_bootmem_desc are left in original -** positions for backwards compatibility. -** Assumes big endian target -*/ + * positions for backwards compatibility. + * Assumes big endian target + */ struct cvmx_bootmem_desc { /** spinlock to control access to list */ u32 lock; @@ -143,46 +138,6 @@ struct octeon_pci_console_desc { }; /** - * This macro returns the size of a member of a structure. - * Logically it is the same as "sizeof(s::field)" in C++, but - * C lacks the "::" operator. - */ -#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field) - -/** - * This macro returns a member of the cvmx_bootmem_desc - * structure. These members can't be directly addressed as - * they might be in memory not directly reachable. In the case - * where bootmem is compiled with LINUX_HOST, the structure - * itself might be located on a remote Octeon. The argument - * "field" is the member name of the cvmx_bootmem_desc to read. - * Regardless of the type of the field, the return type is always - * a u64. - */ -#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \ - __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \ - offsetof(struct cvmx_bootmem_desc, field), \ - SIZEOF_FIELD(struct cvmx_bootmem_desc, field)) - -#define __cvmx_bootmem_lock(flags) (flags = flags) -#define __cvmx_bootmem_unlock(flags) (flags = flags) - -/** - * This macro returns a member of the - * cvmx_bootmem_named_block_desc structure. These members can't - * be directly addressed as they might be in memory not directly - * reachable. In the case where bootmem is compiled with - * LINUX_HOST, the structure itself might be located on a remote - * Octeon. The argument "field" is the member name of the - * cvmx_bootmem_named_block_desc to read. Regardless of the type - * of the field, the return type is always a u64. The "addr" - * parameter is the physical address of the structure. - */ -#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \ - __cvmx_bootmem_desc_get(oct, addr, \ - offsetof(struct cvmx_bootmem_named_block_desc, field), \ - SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field)) -/** * \brief determines if a given console has debug enabled. * @param console console to check * @returns 1 = enabled. 0 otherwise @@ -263,10 +218,15 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct, oct->bootmem_desc_addr = octeon_read_device_mem64(oct, BOOTLOADER_PCI_READ_DESC_ADDR); - major_version = - (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version); - minor_version = - (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version); + major_version = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, major_version), + FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version)); + minor_version = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, minor_version), + FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version)); + dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__, major_version); if ((major_version > 3) || @@ -289,10 +249,20 @@ static const struct cvmx_bootmem_named_block_desc u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags); if (named_addr) { - desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, - base_addr); - desc->size = - CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size); + desc->base_addr = __cvmx_bootmem_desc_get( + oct, named_addr, + offsetof(struct cvmx_bootmem_named_block_desc, + base_addr), + FIELD_SIZEOF( + struct cvmx_bootmem_named_block_desc, + base_addr)); + desc->size = __cvmx_bootmem_desc_get(oct, named_addr, + offsetof(struct cvmx_bootmem_named_block_desc, + size), + FIELD_SIZEOF( + struct cvmx_bootmem_named_block_desc, + size)); + strncpy(desc->name, name, sizeof(desc->name)); desc->name[sizeof(desc->name) - 1] = 0; return &oct->bootmem_named_block_desc; @@ -307,22 +277,41 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, { u64 result = 0; - __cvmx_bootmem_lock(flags); if (!__cvmx_bootmem_check_version(oct, 3)) { u32 i; - u64 named_block_array_addr = - CVMX_BOOTMEM_DESC_GET_FIELD(oct, - named_block_array_addr); - u32 num_blocks = (u32) - CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks); - u32 name_length = (u32) - CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len); + + u64 named_block_array_addr = __cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + named_block_array_addr), + FIELD_SIZEOF(struct cvmx_bootmem_desc, + named_block_array_addr)); + u32 num_blocks = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + nb_num_blocks), + FIELD_SIZEOF(struct cvmx_bootmem_desc, + nb_num_blocks)); + + u32 name_length = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + named_block_name_len), + FIELD_SIZEOF(struct cvmx_bootmem_desc, + named_block_name_len)); + u64 named_addr = named_block_array_addr; for (i = 0; i < num_blocks; i++) { - u64 named_size = - CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, - size); + u64 named_size = __cvmx_bootmem_desc_get( + oct, named_addr, + offsetof( + struct cvmx_bootmem_named_block_desc, + size), + FIELD_SIZEOF( + struct cvmx_bootmem_named_block_desc, + size)); + if (name && named_size) { char *name_tmp = kmalloc(name_length + 1, GFP_KERNEL); @@ -347,7 +336,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, sizeof(struct cvmx_bootmem_named_block_desc); } } - __cvmx_bootmem_unlock(flags); return result; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 586b68899b06..a8df493a5012 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> @@ -32,6 +28,7 @@ #include "cn66xx_regs.h" #include "cn66xx_device.h" #include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" /** Default configuration * for CN66XX OCTEON Models. @@ -520,11 +517,6 @@ static struct octeon_config default_cn23xx_conf = { } }; -enum { - OCTEON_CONFIG_TYPE_DEFAULT = 0, - NUM_OCTEON_CONFS, -}; - static struct octeon_config_ptr { u32 conf_type; } oct_conf_info[MAX_OCTEON_DEVICES] = { @@ -580,15 +572,17 @@ static void *__retrieve_octeon_config_info(struct octeon_device *oct, switch (oct_conf_info[oct_id].conf_type) { case OCTEON_CONFIG_TYPE_DEFAULT: if (oct->chip_id == OCTEON_CN66XX) { - ret = (void *)&default_cn66xx_conf; + ret = &default_cn66xx_conf; } else if ((oct->chip_id == OCTEON_CN68XX) && (card_type == LIO_210NV)) { - ret = (void *)&default_cn68xx_210nv_conf; + ret = &default_cn68xx_210nv_conf; } else if ((oct->chip_id == OCTEON_CN68XX) && (card_type == LIO_410NV)) { - ret = (void *)&default_cn68xx_conf; + ret = &default_cn68xx_conf; } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { - ret = (void *)&default_cn23xx_conf; + ret = &default_cn23xx_conf; + } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { + ret = &default_cn23xx_conf; } break; default: @@ -604,6 +598,7 @@ static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) case OCTEON_CN68XX: return lio_validate_cn6xxx_config_info(oct, conf); case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: return 0; default: break; @@ -649,12 +644,12 @@ void octeon_free_device_mem(struct octeon_device *oct) int i; for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { - if (oct->io_qmask.oq & (1ULL << i)) + if (oct->io_qmask.oq & BIT_ULL(i)) vfree(oct->droq[i]); } for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { - if (oct->io_qmask.iq & (1ULL << i)) + if (oct->io_qmask.iq & BIT_ULL(i)) vfree(oct->instr_queue[i]); } @@ -681,6 +676,9 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, case OCTEON_CN23XX_PF_VID: configsize = sizeof(struct octeon_cn23xx_pf); break; + case OCTEON_CN23XX_VF_VID: + configsize = sizeof(struct octeon_cn23xx_vf); + break; default: pr_err("%s: Unknown PCI Device: 0x%x\n", __func__, @@ -756,6 +754,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) if (OCTEON_CN23XX_PF(oct)) num_ioqs = oct->sriov_info.num_pf_rings; + else if (OCTEON_CN23XX_VF(oct)) + num_ioqs = oct->sriov_info.rings_per_vf; + size = sizeof(struct octeon_ioq_vector) * num_ioqs; oct->ioq_vector = vmalloc(size); @@ -767,6 +768,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) ioq_vector->oct_dev = oct; ioq_vector->iq_index = i; ioq_vector->droq_index = i; + ioq_vector->mbox = oct->mbox[i]; cpu_num = i % num_online_cpus(); cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask); @@ -795,10 +797,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct) if (OCTEON_CN6XXX(oct)) num_descs = - CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); + CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx)); else if (OCTEON_CN23XX_PF(oct)) - num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn23xx_pf, - conf)); + num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf)); + else if (OCTEON_CN23XX_VF(oct)) + num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf)); oct->num_iqs = 0; @@ -821,6 +824,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct) if (octeon_init_instr_queue(oct, txpciq, num_descs)) { /* prevent memory leak */ vfree(oct->instr_queue[0]); + oct->instr_queue[0] = NULL; return 1; } @@ -837,14 +841,15 @@ int octeon_setup_output_queues(struct octeon_device *oct) if (OCTEON_CN6XXX(oct)) { num_descs = - CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); + CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx)); desc_size = - CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf)); + CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx)); } else if (OCTEON_CN23XX_PF(oct)) { - num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn23xx_pf, - conf)); - desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn23xx_pf, - conf)); + num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf)); + desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf)); + } else if (OCTEON_CN23XX_VF(oct)) { + num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf)); + desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); } oct->num_oqs = 0; oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); @@ -853,19 +858,63 @@ int octeon_setup_output_queues(struct octeon_device *oct) if (!oct->droq[0]) return 1; - if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) + if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) { + vfree(oct->droq[oq_no]); + oct->droq[oq_no] = NULL; return 1; + } oct->num_oqs++; return 0; } -void octeon_set_io_queues_off(struct octeon_device *oct) +int octeon_set_io_queues_off(struct octeon_device *oct) { + int loop = BUSY_READING_REG_VF_LOOP_COUNT; + if (OCTEON_CN6XXX(oct)) { octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); + } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { + u32 q_no; + + /* IOQs will already be in reset. + * If RST bit is set, wait for quiet bit to be set. + * Once quiet bit is set, clear the RST bit. + */ + for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { + u64 reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + + while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && + !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && + loop) { + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + loop--; + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + + reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "unable to reset qno %u\n", q_no); + return -1; + } + } } + return 0; } void octeon_set_droq_pkt_op(struct octeon_device *oct, @@ -1070,10 +1119,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) if (OCTEON_CN6XXX(oct)) num_nic_ports = - CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf)); + CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx)); else if (OCTEON_CN23XX_PF(oct)) num_nic_ports = - CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn23xx_pf, conf)); + CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf)); if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", @@ -1143,7 +1192,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) { if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && - (oct->io_qmask.iq & (1ULL << q_no))) + (oct->io_qmask.iq & BIT_ULL(q_no))) return oct->instr_queue[q_no]->max_count; return -1; @@ -1152,7 +1201,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) { if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && - (oct->io_qmask.oq & (1ULL << q_no))) + (oct->io_qmask.oq & BIT_ULL(q_no))) return oct->droq[q_no]->max_count; return -1; } @@ -1168,10 +1217,13 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct) if (OCTEON_CN6XXX(oct)) { default_oct_conf = - (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf)); + (struct octeon_config *)(CHIP_CONF(oct, cn6xxx)); } else if (OCTEON_CN23XX_PF(oct)) { default_oct_conf = (struct octeon_config *) - (CHIP_FIELD(oct, cn23xx_pf, conf)); + (CHIP_CONF(oct, cn23xx_pf)); + } else if (OCTEON_CN23XX_VF(oct)) { + default_oct_conf = (struct octeon_config *) + (CHIP_CONF(oct, cn23xx_vf)); } return default_oct_conf; } @@ -1322,7 +1374,7 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough *to trigger tx interrupts as well, if they are pending. */ - if (oct && OCTEON_CN23XX_PF(oct)) { + if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) { if (droq) writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg); /*we race with firmrware here. read and write the IN_DONE_CNTS*/ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index da15c2ae9330..18f6836250a6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -1,25 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ - + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file octeon_device.h * \brief Host Driver: This file defines the octeon device structure. */ @@ -38,6 +33,7 @@ #define OCTEON_CN68XX 0x0091 #define OCTEON_CN66XX 0x0092 #define OCTEON_CN23XX_PF_VID 0x9702 +#define OCTEON_CN23XX_VF_VID 0x9712 /**RevisionId for the chips */ #define OCTEON_CN23XX_REV_1_0 0x00 @@ -52,7 +48,14 @@ enum octeon_pci_swap_mode { OCTEON_PCI_32BIT_LW_SWAP = 3 }; +enum { + OCTEON_CONFIG_TYPE_DEFAULT = 0, + NUM_OCTEON_CONFS, +}; + +#define OCTEON_INPUT_INTR (1) #define OCTEON_OUTPUT_INTR (2) +#define OCTEON_MBOX_INTR (4) #define OCTEON_ALL_INTR 0xff /*--------------- PCI BAR1 index registers -------------*/ @@ -70,26 +73,30 @@ enum octeon_pci_swap_mode { * as it is initialized. */ #define OCT_DEV_BEGIN_STATE 0x0 -#define OCT_DEV_PCI_MAP_DONE 0x1 -#define OCT_DEV_DISPATCH_INIT_DONE 0x2 -#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3 -#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4 -#define OCT_DEV_RESP_LIST_INIT_DONE 0x5 -#define OCT_DEV_DROQ_INIT_DONE 0x6 -#define OCT_DEV_IO_QUEUES_DONE 0x7 -#define OCT_DEV_CONSOLE_INIT_DONE 0x8 -#define OCT_DEV_HOST_OK 0x9 -#define OCT_DEV_CORE_OK 0xa -#define OCT_DEV_RUNNING 0xb -#define OCT_DEV_IN_RESET 0xc -#define OCT_DEV_STATE_INVALID 0xd +#define OCT_DEV_PCI_ENABLE_DONE 0x1 +#define OCT_DEV_PCI_MAP_DONE 0x2 +#define OCT_DEV_DISPATCH_INIT_DONE 0x3 +#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x4 +#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x5 +#define OCT_DEV_RESP_LIST_INIT_DONE 0x6 +#define OCT_DEV_DROQ_INIT_DONE 0x7 +#define OCT_DEV_MBOX_SETUP_DONE 0x8 +#define OCT_DEV_MSIX_ALLOC_VECTOR_DONE 0x9 +#define OCT_DEV_INTR_SET_DONE 0xa +#define OCT_DEV_IO_QUEUES_DONE 0xb +#define OCT_DEV_CONSOLE_INIT_DONE 0xc +#define OCT_DEV_HOST_OK 0xd +#define OCT_DEV_CORE_OK 0xe +#define OCT_DEV_RUNNING 0xf +#define OCT_DEV_IN_RESET 0x10 +#define OCT_DEV_STATE_INVALID 0x11 #define OCT_DEV_STATES OCT_DEV_STATE_INVALID /** Octeon Device interrupts - * These interrupt bits are set in int_status filed of - * octeon_device structure - */ + * These interrupt bits are set in int_status filed of + * octeon_device structure + */ #define OCT_DEV_INTR_DMA0_FORCE 0x01 #define OCT_DEV_INTR_DMA1_FORCE 0x02 #define OCT_DEV_INTR_PKT_DATA 0x04 @@ -208,6 +215,10 @@ struct octeon_fn_list { irqreturn_t (*process_interrupt_regs)(void *); u64 (*msix_interrupt_handler)(void *); + + int (*setup_mbox)(struct octeon_device *); + int (*free_mbox)(struct octeon_device *); + int (*soft_reset)(struct octeon_device *); int (*setup_device_regs)(struct octeon_device *); void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); @@ -284,6 +295,7 @@ struct octdev_props { #define LIO_FLAG_MSIX_ENABLED 0x1 #define MSIX_PO_INT 0x1 #define MSIX_PI_INT 0x2 +#define MSIX_MBOX_INT 0x4 struct octeon_pf_vf_hs_word { #ifdef __LITTLE_ENDIAN_BITFIELD @@ -322,14 +334,39 @@ struct octeon_pf_vf_hs_word { }; struct octeon_sriov_info { + /* Number of rings assigned to VF */ + u32 rings_per_vf; + + /** Max Number of VF devices that can be enabled. This variable can + * specified during load time or it will be derived after allocating + * PF queues. When max_vfs is derived then each VF will get one queue + **/ + u32 max_vfs; + + /** Number of VF devices enabled using sysfs. */ + u32 num_vfs_alloced; + /* Actual rings left for PF device */ u32 num_pf_rings; - /* SRN of PF usable IO queues */ + /* SRN of PF usable IO queues */ u32 pf_srn; + /* total pf rings */ u32 trs; + u32 sriov_enabled; + + /*lookup table that maps DPI ring number to VF pci_dev struct pointer*/ + struct pci_dev *dpiring_to_vfpcidev_lut[MAX_POSSIBLE_VFS]; + + u64 vf_macaddr[MAX_POSSIBLE_VFS]; + + u16 vf_vlantci[MAX_POSSIBLE_VFS]; + + int vf_linkstate[MAX_POSSIBLE_VFS]; + + u64 vf_drv_loaded_mask; }; struct octeon_ioq_vector { @@ -337,6 +374,7 @@ struct octeon_ioq_vector { int iq_index; int droq_index; int vector; + struct octeon_mbox *mbox; struct cpumask affinity_mask; u32 ioq_num; }; @@ -365,8 +403,13 @@ struct octeon_device { /** Octeon Chip type. */ u16 chip_id; + u16 rev_id; + u16 pf_num; + + u16 vf_num; + /** This device's id - set by the driver. */ u32 octeon_id; @@ -474,6 +517,9 @@ struct octeon_device { int msix_on; + /** Mail Box details of each octeon queue. */ + struct octeon_mbox *mbox[MAX_POSSIBLE_VFS]; + /** IOq information of it's corresponding MSI-X interrupt. */ struct octeon_ioq_vector *ioq_vector; @@ -490,11 +536,14 @@ struct octeon_device { #define OCT_DRV_ONLINE 1 #define OCT_DRV_OFFLINE 2 -#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ - (oct->chip_id == OCTEON_CN68XX)) -#define OCTEON_CN23XX_PF(oct) (oct->chip_id == OCTEON_CN23XX_PF_VID) -#define CHIP_FIELD(oct, TYPE, field) \ - (((struct octeon_ ## TYPE *)(oct->chip))->field) +#define OCTEON_CN6XXX(oct) ({ \ + typeof(oct) _oct = (oct); \ + ((_oct->chip_id == OCTEON_CN66XX) || \ + (_oct->chip_id == OCTEON_CN68XX)); }) +#define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID) +#define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID) +#define CHIP_CONF(oct, TYPE) \ + (((struct octeon_ ## TYPE *)((oct)->chip))->conf) struct oct_intrmod_cmd { struct octeon_device *oct_dev; @@ -508,7 +557,7 @@ struct oct_intrmod_cmd { void octeon_init_device_list(int conf_type); /** Free memory for Input and Output queue structures for a octeon device */ -void octeon_free_device_mem(struct octeon_device *); +void octeon_free_device_mem(struct octeon_device *oct); /* Look up a free entry in the octeon_device table and allocate resources * for the octeon_device structure for an octeon device. Called at init @@ -606,16 +655,16 @@ void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr); /* Routines for reading and writing CSRs */ #define octeon_write_csr(oct_dev, reg_off, value) \ - writel(value, oct_dev->mmio[0].hw_addr + reg_off) + writel(value, (oct_dev)->mmio[0].hw_addr + (reg_off)) #define octeon_write_csr64(oct_dev, reg_off, val64) \ - writeq(val64, oct_dev->mmio[0].hw_addr + reg_off) + writeq(val64, (oct_dev)->mmio[0].hw_addr + (reg_off)) #define octeon_read_csr(oct_dev, reg_off) \ - readl(oct_dev->mmio[0].hw_addr + reg_off) + readl((oct_dev)->mmio[0].hw_addr + (reg_off)) #define octeon_read_csr64(oct_dev, reg_off) \ - readq(oct_dev->mmio[0].hw_addr + reg_off) + readq((oct_dev)->mmio[0].hw_addr + (reg_off)) /** * Checks if memory access is okay @@ -724,7 +773,7 @@ int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); /** Turns off the input and output queues for the device * @param oct which octeon to disable */ -void octeon_set_io_queues_off(struct octeon_device *oct); +int octeon_set_io_queues_off(struct octeon_device *oct); /** Turns on or off the given output queue for the device * @param oct which octeon to change diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index f60e5320daf4..0be87d119a97 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -1,24 +1,20 @@ /********************************************************************** -* Author: Cavium, Inc. -* -* Contact: support@cavium.com -* Please include "LiquidIO" in the subject. -* -* Copyright (c) 2003-2015 Cavium, Inc. -* -* This file is free software; you can redistribute it and/or modify -* it under the terms of the GNU General Public License, Version 2, as -* published by the Free Software Foundation. -* -* This file is distributed in the hope that it will be useful, but -* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty -* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -* NONINFRINGEMENT. See the GNU General Public License for more -* details. -* -* This file may also be available under a different license from Cavium. -* Contact Cavium, Inc. for more information -**********************************************************************/ + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> @@ -32,9 +28,7 @@ #include "cn66xx_regs.h" #include "cn66xx_device.h" #include "cn23xx_pf_device.h" - -#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) -#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) +#include "cn23xx_vf_device.h" struct niclist { struct list_head list; @@ -258,13 +252,18 @@ int octeon_init_droq(struct octeon_device *oct, c_num_descs = num_descs; c_buf_size = desc_size; if (OCTEON_CN6XXX(oct)) { - struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_FIELD(oct, cn23xx_pf, conf); + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); + + c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); + c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); + } else if (OCTEON_CN23XX_VF(oct)) { + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); @@ -337,7 +336,7 @@ int octeon_init_droq(struct octeon_device *oct, /* For 56xx Pass1, this function won't be called, so no checks. */ oct->fn_list.setup_oq_regs(oct, q_no); - oct->io_qmask.oq |= (1ULL << q_no); + oct->io_qmask.oq |= BIT_ULL(q_no); return 0; @@ -409,7 +408,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info( recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; droq->recv_buf_list[idx].buffer = NULL; - INCR_INDEX_BY1(idx, droq->max_count); + idx = incr_index(idx, 1, droq->max_count); bytes_left -= droq->buffer_size; i++; buf_cnt--; @@ -440,14 +439,15 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq, droq->recv_buf_list[refill_index].buffer = NULL; desc_ring[refill_index].buffer_ptr = 0; do { - INCR_INDEX_BY1(droq->refill_idx, - droq->max_count); + droq->refill_idx = incr_index(droq->refill_idx, + 1, + droq->max_count); desc_refilled++; droq->refill_count--; } while (droq->recv_buf_list[droq->refill_idx]. buffer); } - INCR_INDEX_BY1(refill_index, droq->max_count); + refill_index = incr_index(refill_index, 1, droq->max_count); } /* while */ return desc_refilled; } @@ -514,7 +514,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) /* Reset any previous values in the length field. */ droq->info_list[droq->refill_idx].length = 0; - INCR_INDEX_BY1(droq->refill_idx, droq->max_count); + droq->refill_idx = incr_index(droq->refill_idx, 1, + droq->max_count); desc_refilled++; droq->refill_count--; } @@ -599,7 +600,8 @@ static inline void octeon_droq_drop_packets(struct octeon_device *oct, buf_cnt = 1; } - INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); + droq->read_idx = incr_index(droq->read_idx, buf_cnt, + droq->max_count); droq->refill_count += buf_cnt; } } @@ -639,11 +641,12 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, rh = &info->rh; total_len += (u32)info->length; - if (OPCODE_SLOW_PATH(rh)) { + if (opcode_slow_path(rh)) { u32 buf_cnt; buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); - INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); + droq->read_idx = incr_index(droq->read_idx, + buf_cnt, droq->max_count); droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { @@ -657,7 +660,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, droq->recv_buf_list[droq->read_idx].buffer = NULL; - INCR_INDEX_BY1(droq->read_idx, droq->max_count); + droq->read_idx = incr_index(droq->read_idx, 1, + droq->max_count); droq->refill_count++; } else { nicbuf = octeon_fast_packet_alloc((u32) @@ -689,8 +693,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, } pkt_len += cpy_len; - INCR_INDEX_BY1(droq->read_idx, - droq->max_count); + droq->read_idx = + incr_index(droq->read_idx, 1, + droq->max_count); droq->refill_count++; } } @@ -804,9 +809,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, while (total_pkts_processed < budget) { octeon_droq_check_hw_for_pkts(droq); - pkts_available = - CVM_MIN((budget - total_pkts_processed), - (u32)(atomic_read(&droq->pkts_pending))); + pkts_available = min((budget - total_pkts_processed), + (u32)(atomic_read(&droq->pkts_pending))); if (pkts_available == 0) break; @@ -891,6 +895,10 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); } break; + + case OCTEON_CN23XX_VF_VID: + lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); + break; } return 0; } @@ -988,7 +996,8 @@ int octeon_create_droq(struct octeon_device *oct, if (!droq) droq = vmalloc(sizeof(*droq)); if (!droq) - goto create_droq_fail; + return -1; + memset(droq, 0, sizeof(struct octeon_droq)); /*Disable the pkt o/p for this Q */ @@ -996,7 +1005,11 @@ int octeon_create_droq(struct octeon_device *oct, oct->droq[q_no] = droq; /* Initialize the Droq */ - octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { + vfree(oct->droq[q_no]); + oct->droq[q_no] = NULL; + return -1; + } oct->num_oqs++; @@ -1009,8 +1022,4 @@ int octeon_create_droq(struct octeon_device *oct, * the same time. */ return 0; - -create_droq_fail: - octeon_delete_droq(oct, q_no); - return -ENOMEM; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 5be002d5dba4..e62074090681 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -13,13 +13,8 @@ * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information - **********************************************************************/ - + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file octeon_droq.h * \brief Implementation of Octeon Output queues. "Output" is with * respect to the Octeon device on the NIC. From this driver's point of @@ -81,7 +76,7 @@ struct octeon_skb_page_info { * the Octeon device. Since the descriptor ring keeps physical (bus) * addresses, this field is required for the driver to keep track of * the virtual address pointers. -*/ + */ struct octeon_recv_buffer { /** Packet buffer, including metadata. */ void *buffer; @@ -121,7 +116,6 @@ struct oct_droq_stats { /** Num of Packets dropped due to receive path failures. */ u64 rx_dropped; - /** Num of vxlan packets received; */ u64 rx_vxlan; /** Num of failures of recv_buffer_alloc() */ @@ -359,7 +353,7 @@ struct octeon_droq { * @param q_no - droq no. ranges from 0 - 3. * @param app_ctx - pointer to application context * @return Success: 0 Failure: 1 -*/ + */ int octeon_init_droq(struct octeon_device *oct_dev, u32 q_no, u32 num_descs, @@ -372,7 +366,7 @@ int octeon_init_droq(struct octeon_device *oct_dev, * @param oct_dev - pointer to the octeon device structure * @param q_no - droq no. ranges from 0 - 3. * @return: Success: 0 Failure: 1 -*/ + */ int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); /** Register a change in droq operations. The ops field has a pointer to a diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index e4d426ba18dc..e04ca8f0b4a7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -13,13 +13,8 @@ * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information - **********************************************************************/ - + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file octeon_iq.h * \brief Host Driver: Implementation of Octeon input queues. "Input" is * with respect to the Octeon device on the NIC. From this driver's @@ -69,7 +64,6 @@ struct oct_iq_stats { u64 tx_vxlan; /* tunnel */ u64 tx_dmamap_fail; u64 tx_restart; - /*u64 tx_timeout_count;*/ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) @@ -78,7 +72,7 @@ struct oct_iq_stats { * The input queue is used to post raw (instruction) mode data or packet * data to Octeon device from the host. Each input queue (upto 4) for * a Octeon device has one such structure to represent it. -*/ + */ struct octeon_instr_queue { struct octeon_device *oct_dev; @@ -118,8 +112,8 @@ struct octeon_instr_queue { u32 octeon_read_index; /** This index aids in finding the window in the queue where Octeon - * has read the commands. - */ + * has read the commands. + */ u32 flush_index; /** This field keeps track of the instructions pending in this queue. */ @@ -150,8 +144,8 @@ struct octeon_instr_queue { u64 last_db_time; /** The doorbell timeout. If the doorbell was not rung for this time and - * fill_cnt is non-zero, ring the doorbell again. - */ + * fill_cnt is non-zero, ring the doorbell again. + */ u32 db_timeout; /** Statistics for this input queue. */ @@ -309,6 +303,9 @@ struct octeon_sc_buffer_pool { atomic_t alloc_buf_count; }; +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) + int octeon_setup_sc_buffer_pool(struct octeon_device *oct); int octeon_free_sc_buffer_pool(struct octeon_device *oct); struct octeon_soft_command * diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c new file mode 100644 index 000000000000..73696b427f06 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -0,0 +1,318 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "octeon_mailbox.h" + +/** + * octeon_mbox_read: + * @oct: Pointer mailbox + * + * Reads the 8-bytes of data from the mbox register + * Writes back the acknowldgement inidcating completion of read + */ +int octeon_mbox_read(struct octeon_mbox *mbox) +{ + union octeon_mbox_message msg; + int ret = 0; + + spin_lock(&mbox->lock); + + msg.u64 = readq(mbox->mbox_read_reg); + + if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) { + spin_unlock(&mbox->lock); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { + mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64; + mbox->mbox_req.recv_len++; + } else { + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { + mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] = + msg.u64; + mbox->mbox_resp.recv_len++; + } else { + if ((mbox->state & OCTEON_MBOX_STATE_IDLE) && + (msg.s.type == OCTEON_MBOX_REQUEST)) { + mbox->state &= ~OCTEON_MBOX_STATE_IDLE; + mbox->state |= + OCTEON_MBOX_STATE_REQUEST_RECEIVING; + mbox->mbox_req.msg.u64 = msg.u64; + mbox->mbox_req.q_no = mbox->q_no; + mbox->mbox_req.recv_len = 1; + } else { + if ((mbox->state & + OCTEON_MBOX_STATE_RESPONSE_PENDING) && + (msg.s.type == OCTEON_MBOX_RESPONSE)) { + mbox->state &= + ~OCTEON_MBOX_STATE_RESPONSE_PENDING; + mbox->state |= + OCTEON_MBOX_STATE_RESPONSE_RECEIVING + ; + mbox->mbox_resp.msg.u64 = msg.u64; + mbox->mbox_resp.q_no = mbox->q_no; + mbox->mbox_resp.recv_len = 1; + } else { + writeq(OCTEON_PFVFERR, + mbox->mbox_read_reg); + mbox->state |= OCTEON_MBOX_STATE_ERROR; + spin_unlock(&mbox->lock); + return 1; + } + } + } + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { + if (mbox->mbox_req.recv_len < msg.s.len) { + ret = 0; + } else { + mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING; + mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED; + ret = 1; + } + } else { + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { + if (mbox->mbox_resp.recv_len < msg.s.len) { + ret = 0; + } else { + mbox->state &= + ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING; + mbox->state |= + OCTEON_MBOX_STATE_RESPONSE_RECEIVED; + ret = 1; + } + } else { + WARN_ON(1); + } + } + + writeq(OCTEON_PFVFACK, mbox->mbox_read_reg); + + spin_unlock(&mbox->lock); + + return ret; +} + +/** + * octeon_mbox_write: + * @oct: Pointer Octeon Device + * @mbox_cmd: Cmd to send to mailbox. + * + * Populates the queue specific mbox structure + * with cmd information. + * Write the cmd to mbox register + */ +int octeon_mbox_write(struct octeon_device *oct, + struct octeon_mbox_cmd *mbox_cmd) +{ + struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; + u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + unsigned long flags; + + spin_lock_irqsave(&mbox->lock, flags); + + if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) && + !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return OCTEON_MBOX_STATUS_FAILED; + } + + if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) && + !(mbox->state & OCTEON_MBOX_STATE_IDLE)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return OCTEON_MBOX_STATUS_BUSY; + } + + if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) { + memcpy(&mbox->mbox_resp, mbox_cmd, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING; + } + + spin_unlock_irqrestore(&mbox->lock, flags); + + count = 0; + + while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { + schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { + ret = OCTEON_MBOX_STATUS_FAILED; + break; + } + } + + if (ret == OCTEON_MBOX_STATUS_SUCCESS) { + writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg); + for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) { + count = 0; + while (readq(mbox->mbox_write_reg) != + OCTEON_PFVFACK) { + schedule_timeout_uninterruptible(10); + if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { + ret = OCTEON_MBOX_STATUS_FAILED; + break; + } + } + writeq(mbox_cmd->data[i], mbox->mbox_write_reg); + } + } + + spin_lock_irqsave(&mbox->lock, flags); + if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) { + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } else { + if ((!mbox_cmd->msg.s.resp_needed) || + (ret == OCTEON_MBOX_STATUS_FAILED)) { + mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; + if (!(mbox->state & + (OCTEON_MBOX_STATE_REQUEST_RECEIVING | + OCTEON_MBOX_STATE_REQUEST_RECEIVED))) + mbox->state = OCTEON_MBOX_STATE_IDLE; + } + } + spin_unlock_irqrestore(&mbox->lock, flags); + + return ret; +} + +/** + * octeon_mbox_process_cmd: + * @mbox: Pointer mailbox + * @mbox_cmd: Pointer to command received + * + * Process the cmd received in mbox + */ +static int octeon_mbox_process_cmd(struct octeon_mbox *mbox, + struct octeon_mbox_cmd *mbox_cmd) +{ + struct octeon_device *oct = mbox->oct_dev; + + switch (mbox_cmd->msg.s.cmd) { + case OCTEON_VF_ACTIVE: + dev_dbg(&oct->pci_dev->dev, "got vfactive sending data back\n"); + mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE; + mbox_cmd->msg.s.resp_needed = 1; + mbox_cmd->msg.s.len = 2; + mbox_cmd->data[0] = 0; /* VF version is in mbox_cmd->data[0] */ + ((struct lio_version *)&mbox_cmd->data[0])->major = + LIQUIDIO_BASE_MAJOR_VERSION; + ((struct lio_version *)&mbox_cmd->data[0])->minor = + LIQUIDIO_BASE_MINOR_VERSION; + ((struct lio_version *)&mbox_cmd->data[0])->micro = + LIQUIDIO_BASE_MICRO_VERSION; + memcpy(mbox_cmd->msg.s.params, (uint8_t *)&oct->pfvf_hsword, 6); + /* Sending core cofig info to the corresponding active VF.*/ + octeon_mbox_write(oct, mbox_cmd); + break; + + case OCTEON_VF_FLR_REQUEST: + dev_info(&oct->pci_dev->dev, + "got a request for FLR from VF that owns DPI ring %u\n", + mbox->q_no); + pcie_capability_set_word( + oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no], + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + break; + + case OCTEON_PF_CHANGED_VF_MACADDR: + if (OCTEON_CN23XX_VF(oct)) + octeon_pf_changed_vf_macaddr(oct, + mbox_cmd->msg.s.params); + break; + + default: + break; + } + return 0; +} + +/** + *octeon_mbox_process_message: + * + * Process the received mbox message. + */ +int octeon_mbox_process_message(struct octeon_mbox *mbox) +{ + struct octeon_mbox_cmd mbox_cmd; + unsigned long flags; + + spin_lock_irqsave(&mbox->lock, flags); + + if (mbox->state & OCTEON_MBOX_STATE_ERROR) { + if (mbox->state & (OCTEON_MBOX_STATE_RESPONSE_PENDING | + OCTEON_MBOX_STATE_RESPONSE_RECEIVING)) { + memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + mbox_cmd.recv_status = 1; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, + mbox_cmd.fn_arg); + return 0; + } + + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVED) { + memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + mbox_cmd.recv_status = 0; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED) { + memcpy(&mbox_cmd, &mbox->mbox_req, + sizeof(struct octeon_mbox_cmd)); + if (!mbox_cmd.msg.s.resp_needed) { + mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVED; + if (!(mbox->state & + OCTEON_MBOX_STATE_RESPONSE_PENDING)) + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } + + spin_unlock_irqrestore(&mbox->lock, flags); + octeon_mbox_process_cmd(mbox, &mbox_cmd); + return 0; + } + + WARN_ON(1); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h new file mode 100644 index 000000000000..fe60a3e6247b --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -0,0 +1,115 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#ifndef __MAILBOX_H__ +#define __MAILBOX_H__ + +/* Macros for Mail Box Communication */ + +#define OCTEON_MBOX_DATA_MAX 32 + +#define OCTEON_VF_ACTIVE 0x1 +#define OCTEON_VF_FLR_REQUEST 0x2 +#define OCTEON_PF_CHANGED_VF_MACADDR 0x4 + +/*Macro for Read acknowldgement*/ +#define OCTEON_PFVFACK 0xffffffffffffffff +#define OCTEON_PFVFSIG 0x1122334455667788 +#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD + +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME 10 + +enum octeon_mbox_cmd_status { + OCTEON_MBOX_STATUS_SUCCESS = 0, + OCTEON_MBOX_STATUS_FAILED = 1, + OCTEON_MBOX_STATUS_BUSY = 2 +}; + +enum octeon_mbox_message_type { + OCTEON_MBOX_REQUEST = 0, + OCTEON_MBOX_RESPONSE = 1 +}; + +union octeon_mbox_message { + u64 u64; + struct { + u16 type : 1; + u16 resp_needed : 1; + u16 cmd : 6; + u16 len : 8; + u8 params[6]; + } s; +}; + +typedef void (*octeon_mbox_callback_t)(void *, void *, void *); + +struct octeon_mbox_cmd { + union octeon_mbox_message msg; + u64 data[OCTEON_MBOX_DATA_MAX]; + u32 q_no; + u32 recv_len; + u32 recv_status; + octeon_mbox_callback_t fn; + void *fn_arg; +}; + +enum octeon_mbox_state { + OCTEON_MBOX_STATE_IDLE = 1, + OCTEON_MBOX_STATE_REQUEST_RECEIVING = 2, + OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4, + OCTEON_MBOX_STATE_RESPONSE_PENDING = 8, + OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16, + OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 16, + OCTEON_MBOX_STATE_ERROR = 32 +}; + +struct octeon_mbox { + /** A spinlock to protect access to this q_mbox. */ + spinlock_t lock; + + struct octeon_device *oct_dev; + + u32 q_no; + + enum octeon_mbox_state state; + + struct cavium_wk mbox_poll_wk; + + /** SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + void *mbox_int_reg; + + /** SLI_PKT_PF_VF_MBOX_SIG(0) for PF, SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + void *mbox_write_reg; + + /** SLI_PKT_PF_VF_MBOX_SIG(1) for PF, SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + void *mbox_read_reg; + + struct octeon_mbox_cmd mbox_req; + + struct octeon_mbox_cmd mbox_resp; + +}; + +int octeon_mbox_read(struct octeon_mbox *mbox); +int octeon_mbox_write(struct octeon_device *oct, + struct octeon_mbox_cmd *mbox_cmd); +int octeon_mbox_process_message(struct octeon_mbox *mbox); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 366298f7bcb2..8cd389148166 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -13,13 +13,8 @@ * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information - **********************************************************************/ - + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ /*! \file octeon_main.h * \brief Host Driver: This file is included by all host driver source files * to include common definitions. @@ -66,7 +61,7 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, unsigned int *bytes_compl); void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, unsigned int bytes_compl); - +void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); /** Swap 8B blocks */ static inline void octeon_swap_8B_data(u64 *data, u32 blocks) { @@ -78,10 +73,10 @@ static inline void octeon_swap_8B_data(u64 *data, u32 blocks) } /** - * \brief unmaps a PCI BAR - * @param oct Pointer to Octeon device - * @param baridx bar index - */ + * \brief unmaps a PCI BAR + * @param oct Pointer to Octeon device + * @param baridx bar index + */ static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) { dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", @@ -116,7 +111,7 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, mapped_len = oct->mmio[baridx].len; if (!mapped_len) - return 1; + goto err_release_region; if (max_map_len && (mapped_len > max_map_len)) mapped_len = max_map_len; @@ -132,11 +127,15 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, if (!oct->mmio[baridx].hw_addr) { dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", baridx); - return 1; + goto err_release_region; } oct->mmio[baridx].done = 1; return 0; + +err_release_region: + pci_release_region(oct->pci_dev, baridx * 2); + return 1; } static inline void * @@ -203,24 +202,6 @@ out: return errno; } -static inline void -sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond) -{ - wait_queue_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(waitq, &we); - while (!atomic_read(pcond)) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) - goto out; - schedule(); - } -out: - set_current_state(TASK_RUNNING); - remove_wait_queue(waitq, &we); -} - /* Gives up the CPU for a timeout period. * Check that the condition is not true before we go to sleep for a * timeout period. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 0dc081a99b30..13a18c9a7a51 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ #include <linux/netdevice.h> #include "liquidio_common.h" @@ -39,7 +36,7 @@ octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) oct->fn_list.bar1_idx_write(oct, idx, mask); } #else -#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct) +#define octeon_toggle_bar1_swapmode(oct, idx) #endif static void diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h index 11b183377b44..bae2fdd89503 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ /*! \file octeon_mem_ops.h diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index e5d1debd05ad..6bb89419006e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ /*! \file octeon_network.h @@ -29,7 +26,7 @@ #include <linux/ptp_clock_kernel.h> #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) -#define LIO_MIN_MTU_SIZE 68 +#define LIO_MIN_MTU_SIZE ETH_MIN_MTU struct oct_nic_stats_resp { u64 rh; @@ -126,12 +123,13 @@ struct lio { /* work queue for link status */ struct cavium_wq link_status_wq; + int netdev_uc_count; }; #define LIO_SIZE (sizeof(struct lio)) #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) -#define CIU3_WDOG(c) (0x1010000020000ULL + (c << 3)) +#define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3)) #define CIU3_WDOG_MASK 12ULL #define LIO_MONITOR_WDOG_EXPIRE 1 #define LIO_MONITOR_CORE_STUCK_MSGD 2 @@ -342,9 +340,9 @@ static inline void tx_buffer_free(void *buffer) } #define lio_dma_alloc(oct, size, dma_addr) \ - dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL) + dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL) #define lio_dma_free(oct, size, virt_addr, dma_addr) \ - dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) + dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) static inline void *get_rbd(struct sk_buff *skb) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 40ac1fe88956..c3d6a8228362 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 4b8da67b995f..0c7a5c9b2932 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ /*! \file octeon_nic.h @@ -67,7 +64,7 @@ struct octnic_ctrl_pkt { octnic_ctrl_pkt_cb_fn_t cb_fn; }; -#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd)) +#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) /** Structure of data information passed by the NIC module to the OSI * layer when forwarding data to Octeon device software. diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 90866bb50033..3ce66759e80a 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> @@ -31,9 +28,7 @@ #include "octeon_network.h" #include "cn66xx_device.h" #include "cn23xx_pf_device.h" - -#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ - (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) +#include "cn23xx_vf_device.h" struct iq_post_status { int status; @@ -71,9 +66,12 @@ int octeon_init_instr_queue(struct octeon_device *oct, int numa_node = cpu_to_node(iq_no % num_online_cpus()); if (OCTEON_CN6XXX(oct)) - conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); else if (OCTEON_CN23XX_PF(oct)) - conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf))); + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf))); + else if (OCTEON_CN23XX_VF(oct)) + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf))); + if (!conf) { dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", oct->chip_id); @@ -145,7 +143,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, spin_lock_init(&iq->iq_flush_running_lock); - oct->io_qmask.iq |= (1ULL << iq_no); + oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); @@ -157,6 +155,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, WQ_MEM_RECLAIM, 0); if (!oct->check_db_wq[iq_no].wq) { + vfree(iq->request_list); + iq->request_list = NULL; lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", iq_no); @@ -183,10 +183,13 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) if (OCTEON_CN6XXX(oct)) desc_size = - CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx)); else if (OCTEON_CN23XX_PF(oct)) desc_size = - CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf)); + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf)); + else if (OCTEON_CN23XX_VF(oct)) + desc_size = + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf)); vfree(iq->request_list); @@ -239,7 +242,9 @@ int octeon_setup_iq(struct octeon_device *oct, } oct->num_iqs++; - oct->fn_list.enable_io_queues(oct); + if (oct->fn_list.enable_io_queues(oct)) + return 1; + return 0; } @@ -250,9 +255,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) do { instr_cnt = 0; - /*for (i = 0; i < oct->num_iqs; i++) {*/ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { - if (!(oct->io_qmask.iq & (1ULL << i))) + if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; pending = atomic_read(&oct-> @@ -319,7 +323,8 @@ __post_command2(struct octeon_instr_queue *iq, u8 *cmd) /* "index" is returned, host_write_index is modified. */ st.index = iq->host_write_index; - INCR_INDEX_BY1(iq->host_write_index, iq->max_count); + iq->host_write_index = incr_index(iq->host_write_index, 1, + iq->max_count); iq->fill_cnt++; /* Flush the command into memory. We need to be sure the data is in @@ -389,7 +394,7 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_SOFT_COMMAND: sc = buf; - if (OCTEON_CN23XX_PF(oct)) + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) irh = (struct octeon_instr_irh *) &sc->cmd.cmd3.irh; else @@ -434,7 +439,7 @@ lio_process_iq_request_list(struct octeon_device *oct, skip_this: inst_count++; - INCR_INDEX_BY1(old, iq->max_count); + old = incr_index(old, 1, iq->max_count); if ((napi_budget) && (inst_count >= napi_budget)) break; @@ -577,8 +582,6 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, /* This is only done here to expedite packets being flushed * for cases where there are no IQ completion interrupts. */ - /*if (iq->do_auto_flush)*/ - /* octeon_flush_iq(oct, iq, 2, 0);*/ return st.status; } @@ -604,7 +607,7 @@ octeon_prepare_soft_command(struct octeon_device *oct, oct_cfg = octeon_get_conf(oct); - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; @@ -697,7 +700,7 @@ int octeon_send_soft_command(struct octeon_device *oct, struct octeon_instr_irh *irh; u32 len; - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; if (ih3->dlengsz) { WARN_ON(!sc->dmadptr); @@ -749,8 +752,10 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) lio_dma_alloc(oct, SOFT_COMMAND_BUFFER_SIZE, (dma_addr_t *)&dma_addr); - if (!sc) + if (!sc) { + octeon_free_sc_buffer_pool(oct); return 1; + } sc->dma_addr = dma_addr; sc->size = SOFT_COMMAND_BUFFER_SIZE; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index be52178d8cb6..2fbaae96b505 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ #include <linux/pci.h> #include <linux/netdevice.h> @@ -81,17 +78,14 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, spin_lock_bh(&ordered_sc_list->lock); if (ordered_sc_list->head.next == &ordered_sc_list->head) { - /* ordered_sc_list is empty; there is - * nothing to process - */ - spin_unlock_bh - (&ordered_sc_list->lock); + spin_unlock_bh(&ordered_sc_list->lock); return 1; } sc = (struct octeon_soft_command *)ordered_sc_list-> head.next; - if (OCTEON_CN23XX_PF(octeon_dev)) { + if (OCTEON_CN23XX_PF(octeon_dev) || + OCTEON_CN23XX_VF(octeon_dev)) { rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; rptr = sc->cmd.cmd3.rptr; } else { diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index 7a48752dcb10..cbb2d84e8932 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -4,7 +4,7 @@ * Contact: support@cavium.com * Please include "LiquidIO" in the subject. * - * Copyright (c) 2003-2015 Cavium, Inc. + * Copyright (c) 2003-2016 Cavium, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as @@ -15,9 +15,6 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium, Inc. for more information **********************************************************************/ /*! \file response_manager.h @@ -85,7 +82,6 @@ enum { /** A value of 0x00000000 indicates no error i.e. success */ #define DRIVER_ERROR_NONE 0x00000000 -/** (Major number: 0x0000; Minor Number: 0x0001) */ #define DRIVER_ERROR_REQ_PENDING 0x00000001 #define DRIVER_ERROR_REQ_TIMEOUT 0x00000003 #define DRIVER_ERROR_REQ_EINTR 0x00000004 diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 4ab404f45b21..21f80f5744ba 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -645,16 +645,6 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) struct octeon_mgmt *p = netdev_priv(netdev); int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; - /* Limit the MTU to make sure the ethernet packets are between - * 64 bytes and 16383 bytes. - */ - if (size_without_fcs < 64 || size_without_fcs > 16383) { - dev_warn(p->dev, "MTU must be between %d and %d.\n", - 64 - OCTEON_MGMT_RX_HEADROOM, - 16383 - OCTEON_MGMT_RX_HEADROOM); - return -EINVAL; - } - netdev->mtu = new_mtu; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); @@ -1479,6 +1469,12 @@ static int octeon_mgmt_probe(struct platform_device *pdev) p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, p->agl_prt_ctl_size); + if (!p->mix || !p->agl || !p->agl_prt_ctl) { + dev_err(&pdev->dev, "failed to map I/O memory\n"); + result = -ENOMEM; + goto err; + } + spin_lock_init(&p->lock); skb_queue_head_init(&p->tx_list); @@ -1491,6 +1487,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev) netdev->netdev_ops = &octeon_mgmt_ops; netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; + netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; + netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; + mac = of_get_mac_address(pdev->dev.of_node); if (mac) diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 30426109711c..e739c7153562 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -47,7 +47,7 @@ /* Min/Max packet size */ #define NIC_HW_MIN_FRS 64 -#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ +#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */ /* Max pkinds */ #define NIC_MAX_PKIND 16 @@ -149,6 +149,12 @@ struct nicvf_rss_info { u64 key[RSS_HASH_KEY_SIZE]; } ____cacheline_aligned_in_smp; +struct nicvf_pfc { + u8 autoneg; + u8 fc_rx; + u8 fc_tx; +}; + enum rx_stats_reg_offset { RX_OCTS = 0x0, RX_UCAST = 0x1, @@ -178,11 +184,11 @@ enum tx_stats_reg_offset { struct nicvf_hw_stats { u64 rx_bytes; + u64 rx_frames; u64 rx_ucast_frames; u64 rx_bcast_frames; u64 rx_mcast_frames; - u64 rx_fcs_errors; - u64 rx_l2_errors; + u64 rx_drops; u64 rx_drop_red; u64 rx_drop_red_bytes; u64 rx_drop_overrun; @@ -191,6 +197,19 @@ struct nicvf_hw_stats { u64 rx_drop_mcast; u64 rx_drop_l3_bcast; u64 rx_drop_l3_mcast; + u64 rx_fcs_errors; + u64 rx_l2_errors; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_ucast_frames; + u64 tx_bcast_frames; + u64 tx_mcast_frames; + u64 tx_drops; +}; + +struct nicvf_drv_stats { + /* CQE Rx errs */ u64 rx_bgx_truncated_pkts; u64 rx_jabber_errs; u64 rx_fcs_errs; @@ -216,34 +235,30 @@ struct nicvf_hw_stats { u64 rx_l4_pclp; u64 rx_truncated_pkts; - u64 tx_bytes_ok; - u64 tx_ucast_frames_ok; - u64 tx_bcast_frames_ok; - u64 tx_mcast_frames_ok; - u64 tx_drops; -}; - -struct nicvf_drv_stats { - /* Rx */ - u64 rx_frames_ok; - u64 rx_frames_64; - u64 rx_frames_127; - u64 rx_frames_255; - u64 rx_frames_511; - u64 rx_frames_1023; - u64 rx_frames_1518; - u64 rx_frames_jumbo; - u64 rx_drops; - + /* CQE Tx errs */ + u64 tx_desc_fault; + u64 tx_hdr_cons_err; + u64 tx_subdesc_err; + u64 tx_max_size_exceeded; + u64 tx_imm_size_oflow; + u64 tx_data_seq_err; + u64 tx_mem_seq_err; + u64 tx_lock_viol; + u64 tx_data_fault; + u64 tx_tstmp_conflict; + u64 tx_tstmp_timeout; + u64 tx_mem_fault; + u64 tx_csum_overlap; + u64 tx_csum_overflow; + + /* driver debug stats */ u64 rcv_buffer_alloc_failures; - - /* Tx */ - u64 tx_frames_ok; - u64 tx_drops; u64 tx_tso; u64 tx_timeout; u64 txq_stop; u64 txq_wake; + + struct u64_stats_sync syncp; }; struct nicvf { @@ -282,13 +297,14 @@ struct nicvf { u8 node; u8 cpi_alg; - u16 mtu; bool link_up; + u8 mac_type; u8 duplex; u32 speed; bool tns_mode; bool loopback_supported; struct nicvf_rss_info rss_info; + struct nicvf_pfc pfc; struct tasklet_struct qs_err_task; struct work_struct reset_task; @@ -298,7 +314,7 @@ struct nicvf { /* Stats */ struct nicvf_hw_stats hw_stats; - struct nicvf_drv_stats drv_stats; + struct nicvf_drv_stats __percpu *drv_stats; struct bgx_stats bgx_stats; /* MSI-X */ @@ -349,6 +365,7 @@ struct nicvf { #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */ +#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ @@ -438,6 +455,7 @@ struct bgx_stats_msg { /* Physical interface link status */ struct bgx_link_status { u8 msg; + u8 mac_type; u8 link_up; u8 duplex; u32 speed; @@ -490,6 +508,14 @@ struct reset_stat_cfg { u16 sq_stat_mask; }; +struct pfc { + u8 msg; + u8 get; /* Get or set PFC settings */ + u8 autoneg; + u8 fc_rx; + u8 fc_tx; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -508,6 +534,7 @@ union nic_mbx { struct nicvf_ptr nicvf; struct set_loopback lbk; struct reset_stat_cfg reset_stat; + struct pfc pfc; }; #define NIC_NODE_ID_MASK 0x03 diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 2bbf4cbf08b2..767234e2e8f9 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/etherdevice.h> #include <linux/of.h> +#include <linux/if_vlan.h> #include "nic_reg.h" #include "nic.h" @@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) /* Update hardware min/max frame size */ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) { - if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { - dev_err(&nic->pdev->dev, - "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", - vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); + int bgx, lmac, lmac_cnt; + u64 lmac_credits; + + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) return 1; - } - new_frs += ETH_HLEN; - if (new_frs <= nic->pkind.maxlen) - return 0; - nic->pkind.maxlen = new_frs; - nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac += bgx * MAX_LMAC_PER_BGX; + + new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4; + + /* Update corresponding LMAC credits */ + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); + lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8)); + lmac_credits &= ~(0xFFFFFULL << 12); + lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12); + nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits); + + /* Enforce MTU in HW + * This config is supported only from 88xx pass 2.0 onwards. + */ + if (!pass1_silicon(nic->pdev)) + nic_reg_write(nic, + NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs); return 0; } @@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic) /* PKIND configuration */ nic->pkind.minlen = 0; - nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; + nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4; nic->pkind.lenerr_en = 1; nic->pkind.rx_hdr = 0; nic->pkind.hdr_sl = 0; @@ -795,6 +809,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); + /* Enable moving average calculation. + * Keep the LVL/AVG delay to HW enforced minimum so that, not too many + * packets sneek in between average calculations. + */ + nic_reg_write(nic, NIC_PF_CQ_AVG_CFG, + (BIT_ULL(20) | 0x2ull << 14 | 0x1)); + nic_reg_write(nic, NIC_PF_RRM_AVG_CFG, + (BIT_ULL(20) | 0x3ull << 14 | 0x1)); + return 0; } @@ -837,6 +860,7 @@ static int nic_reset_stat_counters(struct nicpf *nic, nic_reg_write(nic, reg_addr, 0); } } + return 0; } @@ -874,6 +898,30 @@ static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); } +static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg) +{ + int bgx, lmac; + struct pfc pfc; + union nic_mbx mbx = {}; + + if (vf >= nic->num_vf_en) + return; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + if (cfg->get) { + bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc); + mbx.pfc.msg = NIC_MBOX_MSG_PFC; + mbx.pfc.autoneg = pfc.autoneg; + mbx.pfc.fc_rx = pfc.fc_rx; + mbx.pfc.fc_tx = pfc.fc_tx; + nic_send_msg_to_vf(nic, vf, &mbx); + } else { + bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg); + nic_mbx_send_ack(nic, vf); + } +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -1013,6 +1061,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_RESET_STAT_COUNTER: ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat); break; + case NIC_MBOX_MSG_PFC: + nic_pause_frame(nic, vf, &mbx.pfc); + goto unlock; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -1243,6 +1294,7 @@ static void nic_poll_for_link(struct work_struct *work) mbx.link_status.link_up = link.link_up; mbx.link_status.duplex = link.duplex; mbx.link_status.speed = link.speed; + mbx.link_status.mac_type = link.mac_type; nic_send_msg_to_vf(nic, vf, &mbx); } } diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index edf779f5a227..80d46337cf29 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -106,6 +106,7 @@ #define NIC_PF_MPI_0_2047_CFG (0x210000) #define NIC_PF_RSSI_0_4097_RQ (0x220000) #define NIC_PF_LMAC_0_7_CFG (0x240000) +#define NIC_PF_LMAC_0_7_CFG2 (0x240100) #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) #define NIC_PF_LMAC_0_7_CREDIT (0x244000) #define NIC_PF_CHAN_0_255_TX_CFG (0x400000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index ad4fddb55421..2e74bbaa38e1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -36,11 +36,11 @@ struct nicvf_stat { static const struct nicvf_stat nicvf_hw_stats[] = { NICVF_HW_STAT(rx_bytes), + NICVF_HW_STAT(rx_frames), NICVF_HW_STAT(rx_ucast_frames), NICVF_HW_STAT(rx_bcast_frames), NICVF_HW_STAT(rx_mcast_frames), - NICVF_HW_STAT(rx_fcs_errors), - NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(rx_drops), NICVF_HW_STAT(rx_drop_red), NICVF_HW_STAT(rx_drop_red_bytes), NICVF_HW_STAT(rx_drop_overrun), @@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = { NICVF_HW_STAT(rx_drop_mcast), NICVF_HW_STAT(rx_drop_l3_bcast), NICVF_HW_STAT(rx_drop_l3_mcast), - NICVF_HW_STAT(rx_bgx_truncated_pkts), - NICVF_HW_STAT(rx_jabber_errs), - NICVF_HW_STAT(rx_fcs_errs), - NICVF_HW_STAT(rx_bgx_errs), - NICVF_HW_STAT(rx_prel2_errs), - NICVF_HW_STAT(rx_l2_hdr_malformed), - NICVF_HW_STAT(rx_oversize), - NICVF_HW_STAT(rx_undersize), - NICVF_HW_STAT(rx_l2_len_mismatch), - NICVF_HW_STAT(rx_l2_pclp), - NICVF_HW_STAT(rx_ip_ver_errs), - NICVF_HW_STAT(rx_ip_csum_errs), - NICVF_HW_STAT(rx_ip_hdr_malformed), - NICVF_HW_STAT(rx_ip_payload_malformed), - NICVF_HW_STAT(rx_ip_ttl_errs), - NICVF_HW_STAT(rx_l3_pclp), - NICVF_HW_STAT(rx_l4_malformed), - NICVF_HW_STAT(rx_l4_csum_errs), - NICVF_HW_STAT(rx_udp_len_errs), - NICVF_HW_STAT(rx_l4_port_errs), - NICVF_HW_STAT(rx_tcp_flag_errs), - NICVF_HW_STAT(rx_tcp_offset_errs), - NICVF_HW_STAT(rx_l4_pclp), - NICVF_HW_STAT(rx_truncated_pkts), - NICVF_HW_STAT(tx_bytes_ok), - NICVF_HW_STAT(tx_ucast_frames_ok), - NICVF_HW_STAT(tx_bcast_frames_ok), - NICVF_HW_STAT(tx_mcast_frames_ok), + NICVF_HW_STAT(rx_fcs_errors), + NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(tx_bytes), + NICVF_HW_STAT(tx_frames), + NICVF_HW_STAT(tx_ucast_frames), + NICVF_HW_STAT(tx_bcast_frames), + NICVF_HW_STAT(tx_mcast_frames), + NICVF_HW_STAT(tx_drops), }; static const struct nicvf_stat nicvf_drv_stats[] = { - NICVF_DRV_STAT(rx_frames_ok), - NICVF_DRV_STAT(rx_frames_64), - NICVF_DRV_STAT(rx_frames_127), - NICVF_DRV_STAT(rx_frames_255), - NICVF_DRV_STAT(rx_frames_511), - NICVF_DRV_STAT(rx_frames_1023), - NICVF_DRV_STAT(rx_frames_1518), - NICVF_DRV_STAT(rx_frames_jumbo), - NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(rx_bgx_truncated_pkts), + NICVF_DRV_STAT(rx_jabber_errs), + NICVF_DRV_STAT(rx_fcs_errs), + NICVF_DRV_STAT(rx_bgx_errs), + NICVF_DRV_STAT(rx_prel2_errs), + NICVF_DRV_STAT(rx_l2_hdr_malformed), + NICVF_DRV_STAT(rx_oversize), + NICVF_DRV_STAT(rx_undersize), + NICVF_DRV_STAT(rx_l2_len_mismatch), + NICVF_DRV_STAT(rx_l2_pclp), + NICVF_DRV_STAT(rx_ip_ver_errs), + NICVF_DRV_STAT(rx_ip_csum_errs), + NICVF_DRV_STAT(rx_ip_hdr_malformed), + NICVF_DRV_STAT(rx_ip_payload_malformed), + NICVF_DRV_STAT(rx_ip_ttl_errs), + NICVF_DRV_STAT(rx_l3_pclp), + NICVF_DRV_STAT(rx_l4_malformed), + NICVF_DRV_STAT(rx_l4_csum_errs), + NICVF_DRV_STAT(rx_udp_len_errs), + NICVF_DRV_STAT(rx_l4_port_errs), + NICVF_DRV_STAT(rx_tcp_flag_errs), + NICVF_DRV_STAT(rx_tcp_offset_errs), + NICVF_DRV_STAT(rx_l4_pclp), + NICVF_DRV_STAT(rx_truncated_pkts), + + NICVF_DRV_STAT(tx_desc_fault), + NICVF_DRV_STAT(tx_hdr_cons_err), + NICVF_DRV_STAT(tx_subdesc_err), + NICVF_DRV_STAT(tx_max_size_exceeded), + NICVF_DRV_STAT(tx_imm_size_oflow), + NICVF_DRV_STAT(tx_data_seq_err), + NICVF_DRV_STAT(tx_mem_seq_err), + NICVF_DRV_STAT(tx_lock_viol), + NICVF_DRV_STAT(tx_data_fault), + NICVF_DRV_STAT(tx_tstmp_conflict), + NICVF_DRV_STAT(tx_tstmp_timeout), + NICVF_DRV_STAT(tx_mem_fault), + NICVF_DRV_STAT(tx_csum_overlap), + NICVF_DRV_STAT(tx_csum_overflow), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), - NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_tso), - NICVF_DRV_STAT(tx_drops), NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), @@ -107,29 +116,65 @@ static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); -static int nicvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int nicvf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct nicvf *nic = netdev_priv(netdev); + u32 supported, advertising; - cmd->supported = 0; - cmd->transceiver = XCVR_EXTERNAL; + supported = 0; + advertising = 0; if (!nic->link_up) { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; return 0; } - if (nic->speed <= 1000) { - cmd->port = PORT_MII; - cmd->autoneg = AUTONEG_ENABLE; - } else { - cmd->port = PORT_FIBRE; - cmd->autoneg = AUTONEG_DISABLE; + switch (nic->speed) { + case SPEED_1000: + cmd->base.port = PORT_MII | PORT_TP; + cmd->base.autoneg = AUTONEG_ENABLE; + supported |= SUPPORTED_MII | SUPPORTED_TP; + supported |= SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_10baseT_Half; + break; + case SPEED_10000: + if (nic->mac_type == BGX_MODE_RXAUI) { + cmd->base.port = PORT_TP; + supported |= SUPPORTED_TP; + } else { + cmd->base.port = PORT_FIBRE; + supported |= SUPPORTED_FIBRE; + } + cmd->base.autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_10000baseT_Full; + break; + case SPEED_40000: + cmd->base.port = PORT_FIBRE; + cmd->base.autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + supported |= SUPPORTED_40000baseCR4_Full; + break; } - cmd->duplex = nic->duplex; - ethtool_cmd_speed_set(cmd, nic->speed); + cmd->base.duplex = nic->duplex; + cmd->base.speed = nic->speed; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } @@ -278,8 +323,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct nicvf *nic = netdev_priv(netdev); - int stat; - int sqs; + int stat, tmp_stats; + int sqs, cpu; nicvf_update_stats(nic); @@ -289,9 +334,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, for (stat = 0; stat < nicvf_n_hw_stats; stat++) *(data++) = ((u64 *)&nic->hw_stats) [nicvf_hw_stats[stat].index]; - for (stat = 0; stat < nicvf_n_drv_stats; stat++) - *(data++) = ((u64 *)&nic->drv_stats) - [nicvf_drv_stats[stat].index]; + for (stat = 0; stat < nicvf_n_drv_stats; stat++) { + tmp_stats = 0; + for_each_possible_cpu(cpu) + tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) + [nicvf_drv_stats[stat].index]; + *(data++) = tmp_stats; + } nicvf_get_qset_stats(nic, stats, &data); @@ -677,8 +726,56 @@ static int nicvf_set_channels(struct net_device *dev, return err; } +static void nicvf_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct nicvf *nic = netdev_priv(dev); + union nic_mbx mbx = {}; + + /* Supported only for 10G/40G interfaces */ + if ((nic->mac_type == BGX_MODE_SGMII) || + (nic->mac_type == BGX_MODE_QSGMII) || + (nic->mac_type == BGX_MODE_RGMII)) + return; + + mbx.pfc.msg = NIC_MBOX_MSG_PFC; + mbx.pfc.get = 1; + if (!nicvf_send_msg_to_pf(nic, &mbx)) { + pause->autoneg = nic->pfc.autoneg; + pause->rx_pause = nic->pfc.fc_rx; + pause->tx_pause = nic->pfc.fc_tx; + } +} + +static int nicvf_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct nicvf *nic = netdev_priv(dev); + union nic_mbx mbx = {}; + + /* Supported only for 10G/40G interfaces */ + if ((nic->mac_type == BGX_MODE_SGMII) || + (nic->mac_type == BGX_MODE_QSGMII) || + (nic->mac_type == BGX_MODE_RGMII)) + return -EOPNOTSUPP; + + if (pause->autoneg) + return -EOPNOTSUPP; + + mbx.pfc.msg = NIC_MBOX_MSG_PFC; + mbx.pfc.get = 0; + mbx.pfc.fc_rx = pause->rx_pause; + mbx.pfc.fc_tx = pause->tx_pause; + if (nicvf_send_msg_to_pf(nic, &mbx)) + return -EAGAIN; + + nic->pfc.fc_rx = pause->rx_pause; + nic->pfc.fc_tx = pause->tx_pause; + + return 0; +} + static const struct ethtool_ops nicvf_ethtool_ops = { - .get_settings = nicvf_get_settings, .get_link = nicvf_get_link, .get_drvinfo = nicvf_get_drvinfo, .get_msglevel = nicvf_get_msglevel, @@ -698,7 +795,10 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .set_rxfh = nicvf_set_rxfh, .get_channels = nicvf_get_channels, .set_channels = nicvf_set_channels, + .get_pauseparam = nicvf_get_pauseparam, + .set_pauseparam = nicvf_set_pauseparam, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = nicvf_get_link_ksettings, }; void nicvf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 45a13f718863..2006f58b14b1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) return qidx; } -static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, - struct sk_buff *skb) -{ - if (skb->len <= 64) - nic->drv_stats.rx_frames_64++; - else if (skb->len <= 127) - nic->drv_stats.rx_frames_127++; - else if (skb->len <= 255) - nic->drv_stats.rx_frames_255++; - else if (skb->len <= 511) - nic->drv_stats.rx_frames_511++; - else if (skb->len <= 1023) - nic->drv_stats.rx_frames_1023++; - else if (skb->len <= 1518) - nic->drv_stats.rx_frames_1518++; - else - nic->drv_stats.rx_frames_jumbo++; -} - /* The Cavium ThunderX network controller can *only* be found in SoCs * containing the ThunderX ARM64 CPU implementation. All accesses to the device * registers on this platform are implicitly strongly ordered with respect @@ -240,6 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->link_up = mbx.link_status.link_up; nic->duplex = mbx.link_status.duplex; nic->speed = mbx.link_status.speed; + nic->mac_type = mbx.link_status.mac_type; if (nic->link_up) { netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", nic->netdev->name, nic->speed, @@ -274,6 +256,12 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf; nic->pf_acked = true; break; + case NIC_MBOX_MSG_PFC: + nic->pfc.autoneg = mbx.pfc.autoneg; + nic->pfc.fc_rx = mbx.pfc.fc_rx; + nic->pfc.fc_tx = mbx.pfc.fc_tx; + nic->pf_acked = true; + break; default: netdev_err(nic->netdev, "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); @@ -492,9 +480,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev, static int nicvf_init_resources(struct nicvf *nic) { int err; - union nic_mbx mbx = {}; - - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; /* Enable Qset */ nicvf_qset_config(nic, true); @@ -507,14 +492,10 @@ static int nicvf_init_resources(struct nicvf *nic) return err; } - /* Send VF config done msg to PF */ - nicvf_write_to_mbx(nic, &mbx); - return 0; } static void nicvf_snd_pkt_handler(struct net_device *netdev, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type, int budget, unsigned int *tx_pkts, unsigned int *tx_bytes) @@ -536,7 +517,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); - nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); + nicvf_check_cqe_tx_errs(nic, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; if (skb) { /* Check for dummy descriptor used for HW TSO offload on 88xx */ @@ -630,8 +611,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, return; } - nicvf_set_rx_frame_cnt(nic, skb); - nicvf_set_rxhash(netdev, cqe_rx, skb); skb_record_rx_queue(skb, rq_idx); @@ -665,6 +644,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, struct cmp_queue *cq = &qs->cq[cq_idx]; struct cqe_rx_t *cq_desc; struct netdev_queue *txq; + struct snd_queue *sq; unsigned int tx_pkts = 0, tx_bytes = 0; spin_lock_bh(&cq->lock); @@ -703,7 +683,7 @@ loop: work_done++; break; case CQE_TYPE_SEND: - nicvf_snd_pkt_handler(netdev, cq, + nicvf_snd_pkt_handler(netdev, (void *)cq_desc, CQE_TYPE_SEND, budget, &tx_pkts, &tx_bytes); tx_done++; @@ -730,17 +710,21 @@ loop: done: /* Wakeup TXQ if its stopped earlier due to SQ full */ - if (tx_done) { + sq = &nic->qs->sq[cq_idx]; + if (tx_done || + (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { netdev = nic->pnicvf->netdev; txq = netdev_get_tx_queue(netdev, nicvf_netdev_qidx(nic, cq_idx)); if (tx_pkts) netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); - nic = nic->pnicvf; + /* To read updated queue and carrier status */ + smp_mb(); if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { - netif_tx_start_queue(txq); - nic->drv_stats.txq_wake++; + netif_tx_wake_queue(txq); + nic = nic->pnicvf; + this_cpu_inc(nic->drv_stats->txq_wake); if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit queue wakeup SQ%d\n", @@ -1075,6 +1059,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) struct nicvf *nic = netdev_priv(netdev); int qid = skb_get_queue_mapping(skb); struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); + struct nicvf *snic; + struct snd_queue *sq; + int tmp; /* Check for minimum packet length */ if (skb->len <= ETH_HLEN) { @@ -1082,13 +1069,39 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { + snic = nic; + /* Get secondary Qset's SQ structure */ + if (qid >= MAX_SND_QUEUES_PER_QS) { + tmp = qid / MAX_SND_QUEUES_PER_QS; + snic = (struct nicvf *)nic->snicvf[tmp - 1]; + if (!snic) { + netdev_warn(nic->netdev, + "Secondary Qset#%d's ptr not initialized\n", + tmp - 1); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + qid = qid % MAX_SND_QUEUES_PER_QS; + } + + sq = &snic->qs->sq[qid]; + if (!netif_tx_queue_stopped(txq) && + !nicvf_sq_append_skb(snic, sq, skb, qid)) { netif_tx_stop_queue(txq); - nic->drv_stats.txq_stop++; - if (netif_msg_tx_err(nic)) - netdev_warn(netdev, - "%s: Transmit ring full, stopping SQ%d\n", - netdev->name, qid); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb(); + + /* Check again, incase another cpu freed descriptors */ + if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) { + netif_tx_wake_queue(txq); + } else { + this_cpu_inc(nic->drv_stats->txq_stop); + if (netif_msg_tx_err(nic)) + netdev_warn(netdev, + "%s: Transmit ring full, stopping SQ%d\n", + netdev->name, qid); + } return NETDEV_TX_BUSY; } @@ -1189,14 +1202,24 @@ int nicvf_stop(struct net_device *netdev) return 0; } +static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) +{ + union nic_mbx mbx = {}; + + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + int nicvf_open(struct net_device *netdev) { - int err, qidx; + int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - - nic->mtu = netdev->mtu; + union nic_mbx mbx = {}; netif_carrier_off(netdev); @@ -1248,9 +1271,17 @@ int nicvf_open(struct net_device *netdev) if (nic->sqs_mode) nicvf_get_primary_vf_struct(nic); - /* Configure receive side scaling */ - if (!nic->sqs_mode) + /* Configure receive side scaling and MTU */ + if (!nic->sqs_mode) { nicvf_rss_init(nic); + if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + goto cleanup; + + /* Clear percpu stats */ + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(nic->drv_stats, cpu), 0, + sizeof(struct nicvf_drv_stats)); + } err = nicvf_register_interrupts(nic); if (err) @@ -1276,8 +1307,9 @@ int nicvf_open(struct net_device *netdev) for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); - nic->drv_stats.txq_stop = 0; - nic->drv_stats.txq_wake = 0; + /* Send VF config done msg to PF */ + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + nicvf_write_to_mbx(nic, &mbx); return 0; cleanup: @@ -1297,31 +1329,20 @@ napi_del: return err; } -static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) -{ - union nic_mbx mbx = {}; - - mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; - mbx.frs.max_frs = mtu; - mbx.frs.vf_id = nic->vf_id; - - return nicvf_send_msg_to_pf(nic, &mbx); -} - static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) { struct nicvf *nic = netdev_priv(netdev); + int orig_mtu = netdev->mtu; - if (new_mtu > NIC_HW_MAX_FRS) - return -EINVAL; + netdev->mtu = new_mtu; - if (new_mtu < NIC_HW_MIN_FRS) - return -EINVAL; + if (!netif_running(netdev)) + return 0; - if (nicvf_update_hw_max_frs(nic, new_mtu)) + if (nicvf_update_hw_max_frs(nic, new_mtu)) { + netdev->mtu = orig_mtu; return -EINVAL; - netdev->mtu = new_mtu; - nic->mtu = new_mtu; + } return 0; } @@ -1379,9 +1400,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic) void nicvf_update_stats(struct nicvf *nic) { - int qidx; + int qidx, cpu; + u64 tmp_stats = 0; struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + struct nicvf_drv_stats *drv_stats; struct queue_set *qs = nic->qs; #define GET_RX_STATS(reg) \ @@ -1404,21 +1426,33 @@ void nicvf_update_stats(struct nicvf *nic) stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); - stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); - stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); - stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); - stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); + stats->tx_bytes = GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); stats->tx_drops = GET_TX_STATS(TX_DROP); - drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + - stats->tx_bcast_frames_ok + - stats->tx_mcast_frames_ok; - drv_stats->rx_frames_ok = stats->rx_ucast_frames + - stats->rx_bcast_frames + - stats->rx_mcast_frames; - drv_stats->rx_drops = stats->rx_drop_red + - stats->rx_drop_overrun; - drv_stats->tx_drops = stats->tx_drops; + /* On T88 pass 2.0, the dummy SQE added for TSO notification + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed + * pointed by dummy SQE and results in tx_drops counter being + * incremented. Subtracting it from tx_tso counter will give + * exact tx_drops counter. + */ + if (nic->t88 && nic->hw_tso) { + for_each_possible_cpu(cpu) { + drv_stats = per_cpu_ptr(nic->drv_stats, cpu); + tmp_stats += drv_stats->tx_tso; + } + stats->tx_drops = tmp_stats - stats->tx_drops; + } + stats->tx_frames = stats->tx_ucast_frames + + stats->tx_bcast_frames + + stats->tx_mcast_frames; + stats->rx_frames = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; + stats->rx_drops = stats->rx_drop_red + + stats->rx_drop_overrun; /* Update RQ and SQ stats */ for (qidx = 0; qidx < qs->rq_cnt; qidx++) @@ -1432,18 +1466,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; nicvf_update_stats(nic); stats->rx_bytes = hw_stats->rx_bytes; - stats->rx_packets = drv_stats->rx_frames_ok; - stats->rx_dropped = drv_stats->rx_drops; + stats->rx_packets = hw_stats->rx_frames; + stats->rx_dropped = hw_stats->rx_drops; stats->multicast = hw_stats->rx_mcast_frames; - stats->tx_bytes = hw_stats->tx_bytes_ok; - stats->tx_packets = drv_stats->tx_frames_ok; - stats->tx_dropped = drv_stats->tx_drops; + stats->tx_bytes = hw_stats->tx_bytes; + stats->tx_packets = hw_stats->tx_frames; + stats->tx_dropped = hw_stats->tx_drops; return stats; } @@ -1456,7 +1489,7 @@ static void nicvf_tx_timeout(struct net_device *dev) netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name); - nic->drv_stats.tx_timeout++; + this_cpu_inc(nic->drv_stats->tx_timeout); schedule_work(&nic->reset_task); } @@ -1590,6 +1623,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_netdev; } + nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); + if (!nic->drv_stats) { + err = -ENOMEM; + goto err_free_netdev; + } + err = nicvf_set_qset_resources(nic); if (err) goto err_free_netdev; @@ -1630,6 +1669,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; + /* MTU range: 64 - 9200 */ + netdev->min_mtu = NIC_HW_MIN_FRS; + netdev->max_mtu = NIC_HW_MAX_FRS; + INIT_WORK(&nic->reset_task, nicvf_reset_task); err = register_netdev(netdev); @@ -1648,6 +1691,8 @@ err_unregister_interrupts: nicvf_unregister_interrupts(nic); err_free_netdev: pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); err_release_regions: pci_release_regions(pdev); @@ -1675,6 +1720,8 @@ static void nicvf_remove(struct pci_dev *pdev) unregister_netdev(pnetdev); nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index a4fc50155881..d2ac133e36f1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, order); if (!nic->rb_page) { - nic->drv_stats.rcv_buffer_alloc_failures++; + this_cpu_inc(nic->pnicvf->drv_stats-> + rcv_buffer_alloc_failures); return -ENOMEM; } nic->rb_page_offset = 0; @@ -270,7 +271,8 @@ refill: rbdr_idx, new_rb); next_rbdr: /* Re-enable RBDR interrupts only if buffer allocation is success */ - if (!nic->rb_alloc_fail && rbdr->enable) + if (!nic->rb_alloc_fail && rbdr->enable && + netif_running(nic->pnicvf->netdev)) nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); if (rbdr_idx) @@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic, static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { + struct sk_buff *skb; + if (!sq) return; if (!sq->dmem.base) @@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); + /* Free pending skbs in the queue */ + smp_rmb(); + while (sq->head != sq->tail) { + skb = (struct sk_buff *)sq->skbuff[sq->head]; + if (skb) + dev_kfree_skb_any(skb); + sq->head++; + sq->head &= (sq->dmem.q_len - 1); + } kfree(sq->skbuff); nicvf_free_q_desc_mem(nic, &sq->dmem); } @@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) { union nic_mbx mbx = {}; - /* Reset all RXQ's stats */ + /* Reset all RQ/SQ and VF stats */ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; + mbx.reset_stat.rx_stat_mask = 0x3FFF; + mbx.reset_stat.tx_stat_mask = 0x1F; mbx.reset_stat.rq_stat_mask = 0xFFFF; + mbx.reset_stat.sq_stat_mask = 0xFFFF; nicvf_send_msg_to_pf(nic, &mbx); } @@ -528,19 +544,26 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, nicvf_send_msg_to_pf(nic, &mbx); mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; - mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); + mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | + (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | + (qs->vnic_id << 0); nicvf_send_msg_to_pf(nic, &mbx); /* RQ drop config * Enable CQ drop to reserve sufficient CQEs for all tx packets */ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; - mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); + mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | + (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | + (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); nicvf_send_msg_to_pf(nic, &mbx); - nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); - if (!nic->sqs_mode) + if (!nic->sqs_mode && (qidx == 0)) { + /* Enable checking L3/L4 length and TCP/UDP checksums */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, + (BIT(24) | BIT(23) | BIT(21))); nicvf_config_vlan_stripping(nic, nic->netdev->features); + } /* Enable Receive queue */ memset(&rq_cfg, 0, sizeof(struct rq_cfg)); @@ -631,6 +654,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ldwb = 0; sq_cfg.qsize = SND_QSIZE; sq_cfg.tstmp_bgx_intf = 0; + sq_cfg.cq_limit = 0; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -1029,7 +1053,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; /* For non-tunneled pkts, point this to L2 ethertype */ hdr->inner_l3_offset = skb_network_offset(skb) - 2; - nic->drv_stats.tx_tso++; + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); } } @@ -1161,35 +1185,17 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); - nic->drv_stats.tx_tso++; + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); return 1; } /* Append an skb to a SQ for packet transfer. */ -int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) +int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, + struct sk_buff *skb, u8 sq_num) { int i, size; int subdesc_cnt, tso_sqe = 0; - int sq_num, qentry; - struct queue_set *qs; - struct snd_queue *sq; - - sq_num = skb_get_queue_mapping(skb); - if (sq_num >= MAX_SND_QUEUES_PER_QS) { - /* Get secondary Qset's SQ structure */ - i = sq_num / MAX_SND_QUEUES_PER_QS; - if (!nic->snicvf[i - 1]) { - netdev_warn(nic->netdev, - "Secondary Qset#%d's ptr not initialized\n", - i - 1); - return 1; - } - nic = (struct nicvf *)nic->snicvf[i - 1]; - sq_num = sq_num % MAX_SND_QUEUES_PER_QS; - } - - qs = nic->qs; - sq = &qs->sq[sq_num]; + int qentry; subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); if (subdesc_cnt > atomic_read(&sq->free_cnt)) @@ -1422,8 +1428,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { - struct nicvf_hw_stats *stats = &nic->hw_stats; - if (!cqe_rx->err_level && !cqe_rx->err_opcode) return 0; @@ -1435,76 +1439,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: - stats->rx_bgx_truncated_pkts++; + this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); break; case CQ_RX_ERROP_RE_JABBER: - stats->rx_jabber_errs++; + this_cpu_inc(nic->drv_stats->rx_jabber_errs); break; case CQ_RX_ERROP_RE_FCS: - stats->rx_fcs_errs++; + this_cpu_inc(nic->drv_stats->rx_fcs_errs); break; case CQ_RX_ERROP_RE_RX_CTL: - stats->rx_bgx_errs++; + this_cpu_inc(nic->drv_stats->rx_bgx_errs); break; case CQ_RX_ERROP_PREL2_ERR: - stats->rx_prel2_errs++; + this_cpu_inc(nic->drv_stats->rx_prel2_errs); break; case CQ_RX_ERROP_L2_MAL: - stats->rx_l2_hdr_malformed++; + this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); break; case CQ_RX_ERROP_L2_OVERSIZE: - stats->rx_oversize++; + this_cpu_inc(nic->drv_stats->rx_oversize); break; case CQ_RX_ERROP_L2_UNDERSIZE: - stats->rx_undersize++; + this_cpu_inc(nic->drv_stats->rx_undersize); break; case CQ_RX_ERROP_L2_LENMISM: - stats->rx_l2_len_mismatch++; + this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); break; case CQ_RX_ERROP_L2_PCLP: - stats->rx_l2_pclp++; + this_cpu_inc(nic->drv_stats->rx_l2_pclp); break; case CQ_RX_ERROP_IP_NOT: - stats->rx_ip_ver_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); break; case CQ_RX_ERROP_IP_CSUM_ERR: - stats->rx_ip_csum_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); break; case CQ_RX_ERROP_IP_MAL: - stats->rx_ip_hdr_malformed++; + this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); break; case CQ_RX_ERROP_IP_MALD: - stats->rx_ip_payload_malformed++; + this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); break; case CQ_RX_ERROP_IP_HOP: - stats->rx_ip_ttl_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); break; case CQ_RX_ERROP_L3_PCLP: - stats->rx_l3_pclp++; + this_cpu_inc(nic->drv_stats->rx_l3_pclp); break; case CQ_RX_ERROP_L4_MAL: - stats->rx_l4_malformed++; + this_cpu_inc(nic->drv_stats->rx_l4_malformed); break; case CQ_RX_ERROP_L4_CHK: - stats->rx_l4_csum_errs++; + this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); break; case CQ_RX_ERROP_UDP_LEN: - stats->rx_udp_len_errs++; + this_cpu_inc(nic->drv_stats->rx_udp_len_errs); break; case CQ_RX_ERROP_L4_PORT: - stats->rx_l4_port_errs++; + this_cpu_inc(nic->drv_stats->rx_l4_port_errs); break; case CQ_RX_ERROP_TCP_FLAG: - stats->rx_tcp_flag_errs++; + this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); break; case CQ_RX_ERROP_TCP_OFFSET: - stats->rx_tcp_offset_errs++; + this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); break; case CQ_RX_ERROP_L4_PCLP: - stats->rx_l4_pclp++; + this_cpu_inc(nic->drv_stats->rx_l4_pclp); break; case CQ_RX_ERROP_RBDR_TRUNC: - stats->rx_truncated_pkts++; + this_cpu_inc(nic->drv_stats->rx_truncated_pkts); break; } @@ -1512,53 +1516,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) } /* Check for errors in the send cmp.queue entry */ -int nicvf_check_cqe_tx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx) +int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) { - struct cmp_queue_stats *stats = &cq->stats; - switch (cqe_tx->send_status) { case CQ_TX_ERROP_GOOD: - stats->tx.good++; return 0; case CQ_TX_ERROP_DESC_FAULT: - stats->tx.desc_fault++; + this_cpu_inc(nic->drv_stats->tx_desc_fault); break; case CQ_TX_ERROP_HDR_CONS_ERR: - stats->tx.hdr_cons_err++; + this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); break; case CQ_TX_ERROP_SUBDC_ERR: - stats->tx.subdesc_err++; + this_cpu_inc(nic->drv_stats->tx_subdesc_err); + break; + case CQ_TX_ERROP_MAX_SIZE_VIOL: + this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); break; case CQ_TX_ERROP_IMM_SIZE_OFLOW: - stats->tx.imm_size_oflow++; + this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR: - stats->tx.data_seq_err++; + this_cpu_inc(nic->drv_stats->tx_data_seq_err); break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR: - stats->tx.mem_seq_err++; + this_cpu_inc(nic->drv_stats->tx_mem_seq_err); break; case CQ_TX_ERROP_LOCK_VIOL: - stats->tx.lock_viol++; + this_cpu_inc(nic->drv_stats->tx_lock_viol); break; case CQ_TX_ERROP_DATA_FAULT: - stats->tx.data_fault++; + this_cpu_inc(nic->drv_stats->tx_data_fault); break; case CQ_TX_ERROP_TSTMP_CONFLICT: - stats->tx.tstmp_conflict++; + this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); break; case CQ_TX_ERROP_TSTMP_TIMEOUT: - stats->tx.tstmp_timeout++; + this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); break; case CQ_TX_ERROP_MEM_FAULT: - stats->tx.mem_fault++; + this_cpu_inc(nic->drv_stats->tx_mem_fault); break; case CQ_TX_ERROP_CK_OVERLAP: - stats->tx.csum_overlap++; + this_cpu_inc(nic->drv_stats->tx_csum_overlap); break; case CQ_TX_ERROP_CK_OFLOW: - stats->tx.csum_overflow++; + this_cpu_inc(nic->drv_stats->tx_csum_overflow); break; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 869f3386028b..9e2104675bc9 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -85,12 +85,26 @@ #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) -/* Calculate number of CQEs to reserve for all SQEs. - * Its 1/256th level of CQ size. - * '+ 1' to account for pipelining + +/* RED and Backpressure levels of CQ for pkt reception + * For CQ, level is a measure of emptiness i.e 0x0 means full + * eg: For CQ of size 4K, and for pass/drop levels of 160/144 + * HW accepts pkt if unused CQE >= 2560 + * RED accepts pkt if unused CQE < 2304 & >= 2560 + * DROPs pkts if unused CQE < 2304 + */ +#define RQ_PASS_CQ_LVL 160ULL +#define RQ_DROP_CQ_LVL 144ULL + +/* RED and Backpressure levels of RBDR for pkt reception + * For RBDR, level is a measure of fullness i.e 0x0 means empty + * eg: For RBDR of size 8K, and for pass/drop levels of 4/0 + * HW accepts pkt if unused RBs >= 256 + * RED accepts pkt if unused RBs < 256 & >= 0 + * DROPs pkts if unused RBs < 0 */ -#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ - (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) +#define RQ_PASS_RBDR_LVL 8ULL +#define RQ_DROP_RBDR_LVL 0ULL /* Descriptor size in bytes */ #define SND_QUEUE_DESC_SIZE 16 @@ -158,6 +172,7 @@ enum CQ_TX_ERROP_E { CQ_TX_ERROP_DESC_FAULT = 0x10, CQ_TX_ERROP_HDR_CONS_ERR = 0x11, CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, @@ -171,25 +186,6 @@ enum CQ_TX_ERROP_E { CQ_TX_ERROP_ENUM_LAST = 0x8a, }; -struct cmp_queue_stats { - struct tx_stats { - u64 good; - u64 desc_fault; - u64 hdr_cons_err; - u64 subdesc_err; - u64 imm_size_oflow; - u64 data_seq_err; - u64 mem_seq_err; - u64 lock_viol; - u64 data_fault; - u64 tstmp_conflict; - u64 tstmp_timeout; - u64 mem_fault; - u64 csum_overlap; - u64 csum_overflow; - } tx; -} ____cacheline_aligned_in_smp; - enum RQ_SQ_STATS { RQ_SQ_STATS_OCTS, RQ_SQ_STATS_PKTS, @@ -241,7 +237,6 @@ struct cmp_queue { spinlock_t lock; /* lock to serialize processing CQEs */ void *desc; struct q_desc_mem dmem; - struct cmp_queue_stats stats; int irq; } ____cacheline_aligned_in_smp; @@ -311,7 +306,8 @@ void nicvf_sq_disable(struct nicvf *nic, int qidx); void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, int qidx); -int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); +int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, + struct sk_buff *skb, u8 sq_num); struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); void nicvf_rbdr_task(unsigned long data); @@ -336,6 +332,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); -int nicvf_check_cqe_tx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx); +int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); #endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h index 9e6d9876bfd0..f36347237a54 100644 --- a/drivers/net/ethernet/cavium/thunder/q_struct.h +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -624,7 +624,9 @@ struct cq_cfg { struct sq_cfg { #if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_20_63:44; + u64 reserved_32_63:32; + u64 cq_limit:8; + u64 reserved_20_23:4; u64 ena:1; u64 reserved_18_18:1; u64 reset:1; @@ -642,7 +644,9 @@ struct sq_cfg { u64 reset:1; u64 reserved_18_18:1; u64 ena:1; - u64 reserved_20_63:44; + u64 reserved_20_23:4; + u64 cq_limit:8; + u64 reserved_32_63:32; #endif }; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8bbaedbb7b94..9211c750e064 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -161,6 +161,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) return; lmac = &bgx->lmac[lmacid]; + link->mac_type = lmac->lmac_type; link->link_up = lmac->link_up; link->duplex = lmac->last_duplex; link->speed = lmac->last_speed; @@ -211,6 +212,47 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) } EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); +void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) +{ + struct pfc *pfc = (struct pfc *)pause; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct lmac *lmac; + u64 cfg; + + if (!bgx) + return; + lmac = &bgx->lmac[lmacid]; + if (lmac->is_sgmii) + return; + + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); + pfc->fc_rx = cfg & RX_EN; + pfc->fc_tx = cfg & TX_EN; + pfc->autoneg = 0; +} +EXPORT_SYMBOL(bgx_lmac_get_pfc); + +void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) +{ + struct pfc *pfc = (struct pfc *)pause; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct lmac *lmac; + u64 cfg; + + if (!bgx) + return; + lmac = &bgx->lmac[lmacid]; + if (lmac->is_sgmii) + return; + + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); + cfg &= ~(RX_EN | TX_EN); + cfg |= (pfc->fc_rx ? RX_EN : 0x00); + cfg |= (pfc->fc_tx ? TX_EN : 0x00); + bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg); +} +EXPORT_SYMBOL(bgx_lmac_set_pfc); + static void bgx_sgmii_change_link_state(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; @@ -524,6 +566,18 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) cfg |= SMU_TX_CTL_DIC_EN; bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); + /* Enable receive and transmission of pause frames */ + bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) | + BCK_EN | DRP_EN | TX_EN | RX_EN)); + /* Configure pause time and interval */ + bgx_reg_write(bgx, lmacid, + BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL); + cfg &= ~0xFFFFull; + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL, + cfg | (DEFAULT_PAUSE_TIME - 0x1000)); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01); + /* take lmac_count into account */ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); /* max packet size */ @@ -970,11 +1024,25 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) lmac_set_training(bgx, lmac, lmac->lmacid); lmac_set_lane2sds(bgx, lmac); - /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */ olmac = &bgx->lmac[idx + 1]; - olmac->lmac_type = lmac->lmac_type; + /* Check if other LMAC on the same DLM is already configured by + * firmware, if so use the same config or else set as same, as + * that of LMAC 0/2. + * This check is needed as on 80xx only one lane of each of the + * DLM of BGX0 is used, so have to rely on firmware for + * distingushing 80xx from 81xx. + */ + cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { + olmac->lmac_type = lmac->lmac_type; + lmac_set_lane2sds(bgx, olmac); + } else { + olmac->lmac_type = lmac_type; + olmac->lane_to_sds = lane_to_sds; + } lmac_set_training(bgx, olmac, olmac->lmacid); - lmac_set_lane2sds(bgx, olmac); } } @@ -1242,8 +1310,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { - bgx->bgx_id = - (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id = (pci_resource_start(pdev, + PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; bgx->max_lmac = MAX_LMAC_PER_BGX; bgx_vnic[bgx->bgx_id] = bgx; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index d59c71e4a000..c18ebfeb2039 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -27,6 +27,9 @@ #define MAX_BGX_CHANS_PER_LMAC 16 #define MAX_DMAC_PER_LMAC 8 #define MAX_FRAME_SIZE 9216 +#define DEFAULT_PAUSE_TIME 0xFFFF + +#define BGX_ID_MASK 0x3 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 @@ -124,7 +127,10 @@ #define SMU_RX_CTL_STATUS (3ull << 0) #define BGX_SMUX_TX_APPEND 0x20100 #define SMU_TX_APPEND_FCS_D BIT_ULL(2) +#define BGX_SMUX_TX_PAUSE_PKT_TIME 0x20110 #define BGX_SMUX_TX_MIN_PKT 0x20118 +#define BGX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120 +#define BGX_SMUX_TX_PAUSE_ZERO 0x20138 #define BGX_SMUX_TX_INT 0x20140 #define BGX_SMUX_TX_CTL 0x20178 #define SMU_TX_CTL_DIC_EN BIT_ULL(0) @@ -134,6 +140,11 @@ #define BGX_SMUX_CTL 0x20200 #define SMU_CTL_RX_IDLE BIT_ULL(0) #define SMU_CTL_TX_IDLE BIT_ULL(1) +#define BGX_SMUX_CBFC_CTL 0x20218 +#define RX_EN BIT_ULL(0) +#define TX_EN BIT_ULL(1) +#define BCK_EN BIT_ULL(2) +#define DRP_EN BIT_ULL(3) #define BGX_GMP_PCS_MRX_CTL 0x30000 #define PCS_MRX_CTL_RST_AN BIT_ULL(9) @@ -205,6 +216,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable); +void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause); +void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause); + void xcv_init_hw(void); void xcv_setup_link(bool link_up, int link_speed); |