diff options
Diffstat (limited to 'drivers/net')
496 files changed, 34457 insertions, 9377 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 217b790d22ed..a764a83f99da 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4102,7 +4102,8 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6096440e96ea..35847250da5a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -160,14 +160,19 @@ static ssize_t bonding_sysfs_store_option(struct device *d, { struct bonding *bond = to_bond(d); const struct bond_option *opt; + char *buffer_clone; int ret; opt = bond_opt_get_by_name(attr->attr.name); if (WARN_ON(!opt)) return -ENOENT; - ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer); + buffer_clone = kstrndup(buffer, count, GFP_KERNEL); + if (!buffer_clone) + return -ENOMEM; + ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone); if (!ret) ret = count; + kfree(buffer_clone); return ret; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index d4dd4da23997..da636a22c542 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " static int i82527_compat; module_param(i82527_compat, int, 0444); -MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode " +MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode " "without using additional functions"); /* diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c71f1cb205f..49163570a63a 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -649,8 +649,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; - *cf = skb_put(skb, sizeof(struct can_frame)); - memset(*cf, 0, sizeof(struct can_frame)); + *cf = skb_put_zero(skb, sizeof(struct can_frame)); return skb; } @@ -678,8 +677,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, can_skb_prv(skb)->ifindex = dev->ifindex; can_skb_prv(skb)->skbcnt = 0; - *cfd = skb_put(skb, sizeof(struct canfd_frame)); - memset(*cfd, 0, sizeof(struct canfd_frame)); + *cfd = skb_put_zero(skb, sizeof(struct canfd_frame)); return skb; } @@ -703,7 +701,8 @@ EXPORT_SYMBOL_GPL(alloc_can_err_skb); /* * Allocate and setup space for the CAN network device */ -struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, + unsigned int txqs, unsigned int rxqs) { struct net_device *dev; struct can_priv *priv; @@ -715,7 +714,8 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) else size = sizeof_priv; - dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup); + dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup, + txqs, rxqs); if (!dev) return NULL; @@ -734,7 +734,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) return dev; } -EXPORT_SYMBOL_GPL(alloc_candev); +EXPORT_SYMBOL_GPL(alloc_candev_mqs); /* * Free space of the CAN network device diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index d53a45bf2a72..8e972ef08637 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1,24 +1,13 @@ -/* - * flexcan.c - FLEXCAN CAN controller driver - * - * Copyright (c) 2005-2006 Varma Electronics Oy - * Copyright (c) 2009 Sascha Hauer, Pengutronix - * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> - * Copyright (c) 2014 David Jander, Protonic Holland - * - * Based on code originally by Andrey Volkov <avolkov@varma-el.com> - * - * LICENCE: - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0 +// +// flexcan.c - FLEXCAN CAN controller driver +// +// Copyright (c) 2005-2006 Varma Electronics Oy +// Copyright (c) 2009 Sascha Hauer, Pengutronix +// Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> +// Copyright (c) 2014 David Jander, Protonic Holland +// +// Based on code originally by Andrey Volkov <avolkov@varma-el.com> #include <linux/netdevice.h> #include <linux/can.h> @@ -523,7 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev, return err; } -static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index adfdb66a486e..02042cb09bd2 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1684,7 +1684,7 @@ static int ican3_stop(struct net_device *ndev) return 0; } -static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index ed8561d4a90f..5696d7e80751 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -486,7 +486,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, if (msg_size <= 0) break; - msg_ptr += msg_size; + msg_ptr += ALIGN(msg_size, 4); } if (msg_size < 0) diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 455a3797a200..c458d5fdc8d3 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -174,9 +174,6 @@ struct pciefd_page { u32 size; }; -#define CANFD_IRQ_SET 0x00000001 -#define CANFD_TX_PATH_SET 0x00000002 - /* CAN-FD channel object */ struct pciefd_board; struct pciefd_can { @@ -418,7 +415,7 @@ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) break; /* going into operational mode: setup IRQ handler */ - err = request_irq(priv->board->pci_dev->irq, + err = request_irq(priv->ucan.ndev->irq, pciefd_irq_handler, IRQF_SHARED, PCIEFD_DRV_NAME, @@ -491,15 +488,18 @@ static int pciefd_post_cmd(struct peak_canfd_priv *ucan) /* controller now in reset mode: */ + /* disable IRQ for this CAN */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + /* stop and reset DMA addresses in Tx/Rx engines */ pciefd_can_clear_tx_dma(priv); pciefd_can_clear_rx_dma(priv); - /* disable IRQ for this CAN */ - pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, - PCIEFD_REG_CAN_RX_CTL_CLR); + /* wait for above commands to complete (read cycle) */ + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); - free_irq(priv->board->pci_dev->irq, priv); + free_irq(priv->ucan.ndev->irq, priv); ucan->can.state = CAN_STATE_STOPPED; @@ -638,7 +638,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) GFP_KERNEL); if (!priv->tx_dma_vaddr) { dev_err(&pciefd->pci_dev->dev, - "Tx dmaim_alloc_coherent(%u) failure\n", + "Tx dmam_alloc_coherent(%u) failure\n", PCIEFD_TX_DMA_SIZE); goto err_free_candev; } @@ -691,7 +691,7 @@ static int pciefd_can_probe(struct pciefd_board *pciefd) pciefd->can[pciefd->can_count] = priv; dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", - ndev->name, priv->reg_base, pciefd->pci_dev->irq); + ndev->name, priv->reg_base, ndev->irq); return 0; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 5adc95c922ee..a97b81d1d0da 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -608,7 +608,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); - mdelay(5); + usleep_range(5000, 6000); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index 485b19c9ae47..b8c39ede7cd5 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -530,7 +530,7 @@ static int pcan_add_channels(struct pcan_pccard *card) pcan_write_reg(card, PCC_CCR, ccr); /* wait 2ms before unresetting channels */ - mdelay(2); + usleep_range(2000, 3000); ccr &= ~PCC_CCR_RST_ALL; pcan_write_reg(card, PCC_CCR, ccr); diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 1ac2090a1721..093fc9a529f0 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -409,7 +409,7 @@ static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode) * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 * [ can_id ] [flags] [len] [can data (up to 8 bytes] */ -static int sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index c36f4bdcbf4f..750d04d9e2ae 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -1,6 +1,12 @@ menu "CAN USB interfaces" depends on USB +config CAN_8DEV_USB + tristate "8 devices USB2CAN interface" + ---help--- + This driver supports the USB2CAN interface + from 8 devices (http://www.8devices.com). + config CAN_EMS_USB tristate "EMS CPC-USB/ARM7 CAN/USB interface" ---help--- @@ -26,7 +32,7 @@ config CAN_KVASER_USB tristate "Kvaser CAN/USB interface" ---help--- This driver adds support for Kvaser CAN/USB devices like Kvaser - Leaf Light and Kvaser USBcan II. + Leaf Light, Kvaser USBcan II and Kvaser Memorator Pro 5xHS. The driver provides support for the following devices: - Kvaser Leaf Light @@ -55,12 +61,30 @@ config CAN_KVASER_USB - Kvaser Memorator HS/HS - Kvaser Memorator HS/LS - Scania VCI2 (if you have the Kvaser logo on top) + - Kvaser BlackBird v2 + - Kvaser Leaf Pro HS v2 + - Kvaser Hybrid 2xCAN/LIN + - Kvaser Hybrid Pro 2xCAN/LIN + - Kvaser Memorator 2xHS v2 + - Kvaser Memorator Pro 2xHS v2 + - Kvaser Memorator Pro 5xHS + - Kvaser USBcan Light 4xHS + - Kvaser USBcan Pro 2xHS v2 + - Kvaser USBcan Pro 5xHS + - ATI Memorator Pro 2xHS v2 + - ATI USBcan Pro 2xHS v2 If unsure, say N. To compile this driver as a module, choose M here: the module will be called kvaser_usb. +config CAN_MCBA_USB + tristate "Microchip CAN BUS Analyzer interface" + ---help--- + This driver supports the CAN BUS Analyzer interface + from Microchip (http://www.microchip.com/development-tools/). + config CAN_PEAK_USB tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD" ---help--- @@ -77,16 +101,26 @@ config CAN_PEAK_USB (see also http://www.peak-system.com). -config CAN_8DEV_USB - tristate "8 devices USB2CAN interface" - ---help--- - This driver supports the USB2CAN interface - from 8 devices (http://www.8devices.com). - config CAN_MCBA_USB tristate "Microchip CAN BUS Analyzer interface" ---help--- This driver supports the CAN BUS Analyzer interface from Microchip (http://www.microchip.com/development-tools/). +config CAN_UCAN + tristate "Theobroma Systems UCAN interface" + ---help--- + This driver supports the Theobroma Systems + UCAN USB-CAN interface. + + The UCAN driver supports the microcontroller-based USB/CAN + adapters from Theobroma Systems. There are two form-factors + that run essentially the same firmware: + + * Seal: standalone USB stick + https://www.theobroma-systems.com/seal) + * Mule: integrated on the PCB of various System-on-Modules + from Theobroma Systems like the A31-µQ7 and the RK3399-Q7 + (https://www.theobroma-systems.com/rk3399-q7) + endmenu diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 49ac7b99ba32..aa0f17c0b2ed 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -3,10 +3,11 @@ # Makefile for the Linux Controller Area Network USB drivers. # +obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o obj-$(CONFIG_CAN_GS_USB) += gs_usb.o -obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o -obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ -obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o +obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/ obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o +obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ +obj-$(CONFIG_CAN_UCAN) += ucan.o diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c deleted file mode 100644 index daed57d3d209..000000000000 --- a/drivers/net/can/usb/kvaser_usb.c +++ /dev/null @@ -1,2085 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * Parts of this driver are based on the following: - * - Kvaser linux leaf driver (version 4.78) - * - CAN driver for esd CAN-USB/2 - * - Kvaser linux usbcanII driver (version 5.3) - * - * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved. - * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh - * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> - * Copyright (C) 2015 Valeo S.A. - */ - -#include <linux/spinlock.h> -#include <linux/kernel.h> -#include <linux/completion.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/usb.h> - -#include <linux/can.h> -#include <linux/can/dev.h> -#include <linux/can/error.h> - -#define MAX_RX_URBS 4 -#define START_TIMEOUT 1000 /* msecs */ -#define STOP_TIMEOUT 1000 /* msecs */ -#define USB_SEND_TIMEOUT 1000 /* msecs */ -#define USB_RECV_TIMEOUT 1000 /* msecs */ -#define RX_BUFFER_SIZE 3072 -#define CAN_USB_CLOCK 8000000 -#define MAX_NET_DEVICES 3 -#define MAX_USBCAN_NET_DEVICES 2 - -/* Kvaser Leaf USB devices */ -#define KVASER_VENDOR_ID 0x0bfd -#define USB_LEAF_DEVEL_PRODUCT_ID 10 -#define USB_LEAF_LITE_PRODUCT_ID 11 -#define USB_LEAF_PRO_PRODUCT_ID 12 -#define USB_LEAF_SPRO_PRODUCT_ID 14 -#define USB_LEAF_PRO_LS_PRODUCT_ID 15 -#define USB_LEAF_PRO_SWC_PRODUCT_ID 16 -#define USB_LEAF_PRO_LIN_PRODUCT_ID 17 -#define USB_LEAF_SPRO_LS_PRODUCT_ID 18 -#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19 -#define USB_MEMO2_DEVEL_PRODUCT_ID 22 -#define USB_MEMO2_HSHS_PRODUCT_ID 23 -#define USB_UPRO_HSHS_PRODUCT_ID 24 -#define USB_LEAF_LITE_GI_PRODUCT_ID 25 -#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26 -#define USB_MEMO2_HSLS_PRODUCT_ID 27 -#define USB_LEAF_LITE_CH_PRODUCT_ID 28 -#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29 -#define USB_OEM_MERCURY_PRODUCT_ID 34 -#define USB_OEM_LEAF_PRODUCT_ID 35 -#define USB_CAN_R_PRODUCT_ID 39 -#define USB_LEAF_LITE_V2_PRODUCT_ID 288 -#define USB_MINI_PCIE_HS_PRODUCT_ID 289 -#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 -#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 -#define USB_MINI_PCIE_2HS_PRODUCT_ID 292 - -static inline bool kvaser_is_leaf(const struct usb_device_id *id) -{ - return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID; -} - -/* Kvaser USBCan-II devices */ -#define USB_USBCAN_REVB_PRODUCT_ID 2 -#define USB_VCI2_PRODUCT_ID 3 -#define USB_USBCAN2_PRODUCT_ID 4 -#define USB_MEMORATOR_PRODUCT_ID 5 - -static inline bool kvaser_is_usbcan(const struct usb_device_id *id) -{ - return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && - id->idProduct <= USB_MEMORATOR_PRODUCT_ID; -} - -/* USB devices features */ -#define KVASER_HAS_SILENT_MODE BIT(0) -#define KVASER_HAS_TXRX_ERRORS BIT(1) - -/* Message header size */ -#define MSG_HEADER_LEN 2 - -/* Can message flags */ -#define MSG_FLAG_ERROR_FRAME BIT(0) -#define MSG_FLAG_OVERRUN BIT(1) -#define MSG_FLAG_NERR BIT(2) -#define MSG_FLAG_WAKEUP BIT(3) -#define MSG_FLAG_REMOTE_FRAME BIT(4) -#define MSG_FLAG_RESERVED BIT(5) -#define MSG_FLAG_TX_ACK BIT(6) -#define MSG_FLAG_TX_REQUEST BIT(7) - -/* Can states (M16C CxSTRH register) */ -#define M16C_STATE_BUS_RESET BIT(0) -#define M16C_STATE_BUS_ERROR BIT(4) -#define M16C_STATE_BUS_PASSIVE BIT(5) -#define M16C_STATE_BUS_OFF BIT(6) - -/* Can msg ids */ -#define CMD_RX_STD_MESSAGE 12 -#define CMD_TX_STD_MESSAGE 13 -#define CMD_RX_EXT_MESSAGE 14 -#define CMD_TX_EXT_MESSAGE 15 -#define CMD_SET_BUS_PARAMS 16 -#define CMD_GET_BUS_PARAMS 17 -#define CMD_GET_BUS_PARAMS_REPLY 18 -#define CMD_GET_CHIP_STATE 19 -#define CMD_CHIP_STATE_EVENT 20 -#define CMD_SET_CTRL_MODE 21 -#define CMD_GET_CTRL_MODE 22 -#define CMD_GET_CTRL_MODE_REPLY 23 -#define CMD_RESET_CHIP 24 -#define CMD_RESET_CARD 25 -#define CMD_START_CHIP 26 -#define CMD_START_CHIP_REPLY 27 -#define CMD_STOP_CHIP 28 -#define CMD_STOP_CHIP_REPLY 29 - -#define CMD_LEAF_GET_CARD_INFO2 32 -#define CMD_USBCAN_RESET_CLOCK 32 -#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 - -#define CMD_GET_CARD_INFO 34 -#define CMD_GET_CARD_INFO_REPLY 35 -#define CMD_GET_SOFTWARE_INFO 38 -#define CMD_GET_SOFTWARE_INFO_REPLY 39 -#define CMD_ERROR_EVENT 45 -#define CMD_FLUSH_QUEUE 48 -#define CMD_RESET_ERROR_COUNTER 49 -#define CMD_TX_ACKNOWLEDGE 50 -#define CMD_CAN_ERROR_EVENT 51 -#define CMD_FLUSH_QUEUE_REPLY 68 - -#define CMD_LEAF_USB_THROTTLE 77 -#define CMD_LEAF_LOG_MESSAGE 106 - -/* error factors */ -#define M16C_EF_ACKE BIT(0) -#define M16C_EF_CRCE BIT(1) -#define M16C_EF_FORME BIT(2) -#define M16C_EF_STFE BIT(3) -#define M16C_EF_BITE0 BIT(4) -#define M16C_EF_BITE1 BIT(5) -#define M16C_EF_RCVE BIT(6) -#define M16C_EF_TRE BIT(7) - -/* Only Leaf-based devices can report M16C error factors, - * thus define our own error status flags for USBCANII - */ -#define USBCAN_ERROR_STATE_NONE 0 -#define USBCAN_ERROR_STATE_TX_ERROR BIT(0) -#define USBCAN_ERROR_STATE_RX_ERROR BIT(1) -#define USBCAN_ERROR_STATE_BUSERROR BIT(2) - -/* bittiming parameters */ -#define KVASER_USB_TSEG1_MIN 1 -#define KVASER_USB_TSEG1_MAX 16 -#define KVASER_USB_TSEG2_MIN 1 -#define KVASER_USB_TSEG2_MAX 8 -#define KVASER_USB_SJW_MAX 4 -#define KVASER_USB_BRP_MIN 1 -#define KVASER_USB_BRP_MAX 64 -#define KVASER_USB_BRP_INC 1 - -/* ctrl modes */ -#define KVASER_CTRL_MODE_NORMAL 1 -#define KVASER_CTRL_MODE_SILENT 2 -#define KVASER_CTRL_MODE_SELFRECEPTION 3 -#define KVASER_CTRL_MODE_OFF 4 - -/* Extended CAN identifier flag */ -#define KVASER_EXTENDED_FRAME BIT(31) - -/* Kvaser USB CAN dongles are divided into two major families: - * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo' - * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios' - */ -enum kvaser_usb_family { - KVASER_LEAF, - KVASER_USBCAN, -}; - -struct kvaser_msg_simple { - u8 tid; - u8 channel; -} __packed; - -struct kvaser_msg_cardinfo { - u8 tid; - u8 nchannels; - union { - struct { - __le32 serial_number; - __le32 padding; - } __packed leaf0; - struct { - __le32 serial_number_low; - __le32 serial_number_high; - } __packed usbcan0; - } __packed; - __le32 clock_resolution; - __le32 mfgdate; - u8 ean[8]; - u8 hw_revision; - union { - struct { - u8 usb_hs_mode; - } __packed leaf1; - struct { - u8 padding; - } __packed usbcan1; - } __packed; - __le16 padding; -} __packed; - -struct kvaser_msg_cardinfo2 { - u8 tid; - u8 reserved; - u8 pcb_id[24]; - __le32 oem_unlock_code; -} __packed; - -struct leaf_msg_softinfo { - u8 tid; - u8 padding0; - __le32 sw_options; - __le32 fw_version; - __le16 max_outstanding_tx; - __le16 padding1[9]; -} __packed; - -struct usbcan_msg_softinfo { - u8 tid; - u8 fw_name[5]; - __le16 max_outstanding_tx; - u8 padding[6]; - __le32 fw_version; - __le16 checksum; - __le16 sw_options; -} __packed; - -struct kvaser_msg_busparams { - u8 tid; - u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; -} __packed; - -struct kvaser_msg_tx_can { - u8 channel; - u8 tid; - u8 msg[14]; - union { - struct { - u8 padding; - u8 flags; - } __packed leaf; - struct { - u8 flags; - u8 padding; - } __packed usbcan; - } __packed; -} __packed; - -struct kvaser_msg_rx_can_header { - u8 channel; - u8 flag; -} __packed; - -struct leaf_msg_rx_can { - u8 channel; - u8 flag; - - __le16 time[3]; - u8 msg[14]; -} __packed; - -struct usbcan_msg_rx_can { - u8 channel; - u8 flag; - - u8 msg[14]; - __le16 time; -} __packed; - -struct leaf_msg_chip_state_event { - u8 tid; - u8 channel; - - __le16 time[3]; - u8 tx_errors_count; - u8 rx_errors_count; - - u8 status; - u8 padding[3]; -} __packed; - -struct usbcan_msg_chip_state_event { - u8 tid; - u8 channel; - - u8 tx_errors_count; - u8 rx_errors_count; - __le16 time; - - u8 status; - u8 padding[3]; -} __packed; - -struct kvaser_msg_tx_acknowledge_header { - u8 channel; - u8 tid; -} __packed; - -struct leaf_msg_tx_acknowledge { - u8 channel; - u8 tid; - - __le16 time[3]; - u8 flags; - u8 time_offset; -} __packed; - -struct usbcan_msg_tx_acknowledge { - u8 channel; - u8 tid; - - __le16 time; - __le16 padding; -} __packed; - -struct leaf_msg_error_event { - u8 tid; - u8 flags; - __le16 time[3]; - u8 channel; - u8 padding; - u8 tx_errors_count; - u8 rx_errors_count; - u8 status; - u8 error_factor; -} __packed; - -struct usbcan_msg_error_event { - u8 tid; - u8 padding; - u8 tx_errors_count_ch0; - u8 rx_errors_count_ch0; - u8 tx_errors_count_ch1; - u8 rx_errors_count_ch1; - u8 status_ch0; - u8 status_ch1; - __le16 time; -} __packed; - -struct kvaser_msg_ctrl_mode { - u8 tid; - u8 channel; - u8 ctrl_mode; - u8 padding[3]; -} __packed; - -struct kvaser_msg_flush_queue { - u8 tid; - u8 channel; - u8 flags; - u8 padding[3]; -} __packed; - -struct leaf_msg_log_message { - u8 channel; - u8 flags; - __le16 time[3]; - u8 dlc; - u8 time_offset; - __le32 id; - u8 data[8]; -} __packed; - -struct kvaser_msg { - u8 len; - u8 id; - union { - struct kvaser_msg_simple simple; - struct kvaser_msg_cardinfo cardinfo; - struct kvaser_msg_cardinfo2 cardinfo2; - struct kvaser_msg_busparams busparams; - - struct kvaser_msg_rx_can_header rx_can_header; - struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header; - - union { - struct leaf_msg_softinfo softinfo; - struct leaf_msg_rx_can rx_can; - struct leaf_msg_chip_state_event chip_state_event; - struct leaf_msg_tx_acknowledge tx_acknowledge; - struct leaf_msg_error_event error_event; - struct leaf_msg_log_message log_message; - } __packed leaf; - - union { - struct usbcan_msg_softinfo softinfo; - struct usbcan_msg_rx_can rx_can; - struct usbcan_msg_chip_state_event chip_state_event; - struct usbcan_msg_tx_acknowledge tx_acknowledge; - struct usbcan_msg_error_event error_event; - } __packed usbcan; - - struct kvaser_msg_tx_can tx_can; - struct kvaser_msg_ctrl_mode ctrl_mode; - struct kvaser_msg_flush_queue flush_queue; - } u; -} __packed; - -/* Summary of a kvaser error event, for a unified Leaf/Usbcan error - * handling. Some discrepancies between the two families exist: - * - * - USBCAN firmware does not report M16C "error factors" - * - USBCAN controllers has difficulties reporting if the raised error - * event is for ch0 or ch1. They leave such arbitration to the OS - * driver by letting it compare error counters with previous values - * and decide the error event's channel. Thus for USBCAN, the channel - * field is only advisory. - */ -struct kvaser_usb_error_summary { - u8 channel, status, txerr, rxerr; - union { - struct { - u8 error_factor; - } leaf; - struct { - u8 other_ch_status; - u8 error_state; - } usbcan; - }; -}; - -/* Context for an outstanding, not yet ACKed, transmission */ -struct kvaser_usb_tx_urb_context { - struct kvaser_usb_net_priv *priv; - u32 echo_index; - int dlc; -}; - -struct kvaser_usb { - struct usb_device *udev; - struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES]; - - struct usb_endpoint_descriptor *bulk_in, *bulk_out; - struct usb_anchor rx_submitted; - - /* @max_tx_urbs: Firmware-reported maximum number of outstanding, - * not yet ACKed, transmissions on this device. This value is - * also used as a sentinel for marking free tx contexts. - */ - u32 fw_version; - unsigned int nchannels; - unsigned int max_tx_urbs; - enum kvaser_usb_family family; - - bool rxinitdone; - void *rxbuf[MAX_RX_URBS]; - dma_addr_t rxbuf_dma[MAX_RX_URBS]; -}; - -struct kvaser_usb_net_priv { - struct can_priv can; - struct can_berr_counter bec; - - struct kvaser_usb *dev; - struct net_device *netdev; - int channel; - - struct completion start_comp, stop_comp; - struct usb_anchor tx_submitted; - - spinlock_t tx_contexts_lock; - int active_tx_contexts; - struct kvaser_usb_tx_urb_context tx_contexts[]; -}; - -static const struct usb_device_id kvaser_usb_table[] = { - /* Leaf family IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS | - KVASER_HAS_SILENT_MODE }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, - - /* USBCANII family IDs */ - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), - .driver_info = KVASER_HAS_TXRX_ERRORS }, - - { } -}; -MODULE_DEVICE_TABLE(usb, kvaser_usb_table); - -static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev, - struct kvaser_msg *msg) -{ - int actual_len; - - return usb_bulk_msg(dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - msg, msg->len, &actual_len, - USB_SEND_TIMEOUT); -} - -static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, - struct kvaser_msg *msg) -{ - struct kvaser_msg *tmp; - void *buf; - int actual_len; - int err; - int pos; - unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT); - - buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - do { - err = usb_bulk_msg(dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - buf, RX_BUFFER_SIZE, &actual_len, - USB_RECV_TIMEOUT); - if (err < 0) - goto end; - - pos = 0; - while (pos <= actual_len - MSG_HEADER_LEN) { - tmp = buf + pos; - - /* Handle messages crossing the USB endpoint max packet - * size boundary. Check kvaser_usb_read_bulk_callback() - * for further details. - */ - if (tmp->len == 0) { - pos = round_up(pos, le16_to_cpu(dev->bulk_in-> - wMaxPacketSize)); - continue; - } - - if (pos + tmp->len > actual_len) { - dev_err_ratelimited(dev->udev->dev.parent, - "Format error\n"); - break; - } - - if (tmp->id == id) { - memcpy(msg, tmp, tmp->len); - goto end; - } - - pos += tmp->len; - } - } while (time_before(jiffies, to)); - - err = -EINVAL; - -end: - kfree(buf); - - return err; -} - -static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev, - u8 msg_id, int channel) -{ - struct kvaser_msg *msg; - int rc; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->id = msg_id; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); - msg->u.simple.channel = channel; - msg->u.simple.tid = 0xff; - - rc = kvaser_usb_send_msg(dev, msg); - - kfree(msg); - return rc; -} - -static int kvaser_usb_get_software_info(struct kvaser_usb *dev) -{ - struct kvaser_msg msg; - int err; - - err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0); - if (err) - return err; - - err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg); - if (err) - return err; - - switch (dev->family) { - case KVASER_LEAF: - dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); - break; - case KVASER_USBCAN: - dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); - dev->max_tx_urbs = - le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); - break; - } - - return 0; -} - -static int kvaser_usb_get_card_info(struct kvaser_usb *dev) -{ - struct kvaser_msg msg; - int err; - - err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0); - if (err) - return err; - - err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg); - if (err) - return err; - - dev->nchannels = msg.u.cardinfo.nchannels; - if ((dev->nchannels > MAX_NET_DEVICES) || - (dev->family == KVASER_USBCAN && - dev->nchannels > MAX_USBCAN_NET_DEVICES)) - return -EINVAL; - - return 0; -} - -static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct net_device_stats *stats; - struct kvaser_usb_tx_urb_context *context; - struct kvaser_usb_net_priv *priv; - struct sk_buff *skb; - struct can_frame *cf; - unsigned long flags; - u8 channel, tid; - - channel = msg->u.tx_acknowledge_header.channel; - tid = msg->u.tx_acknowledge_header.tid; - - if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - if (!netif_device_present(priv->netdev)) - return; - - stats = &priv->netdev->stats; - - context = &priv->tx_contexts[tid % dev->max_tx_urbs]; - - /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && - (priv->can.state >= CAN_STATE_BUS_OFF)) { - skb = alloc_can_err_skb(priv->netdev, &cf); - if (skb) { - cf->can_id |= CAN_ERR_RESTARTED; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } else { - netdev_err(priv->netdev, - "No memory left for err_skb\n"); - } - - priv->can.can_stats.restarts++; - netif_carrier_on(priv->netdev); - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - - stats->tx_packets++; - stats->tx_bytes += context->dlc; - - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = dev->max_tx_urbs; - --priv->active_tx_contexts; - netif_wake_queue(priv->netdev); - - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); -} - -static void kvaser_usb_simple_msg_callback(struct urb *urb) -{ - struct net_device *netdev = urb->context; - - kfree(urb->transfer_buffer); - - if (urb->status) - netdev_warn(netdev, "urb status received: %d\n", - urb->status); -} - -static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, - u8 msg_id) -{ - struct kvaser_usb *dev = priv->dev; - struct net_device *netdev = priv->netdev; - struct kvaser_msg *msg; - struct urb *urb; - void *buf; - int err; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) - return -ENOMEM; - - buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); - if (!buf) { - usb_free_urb(urb); - return -ENOMEM; - } - - msg = (struct kvaser_msg *)buf; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple); - msg->id = msg_id; - msg->u.simple.channel = priv->channel; - - usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - buf, msg->len, - kvaser_usb_simple_msg_callback, netdev); - usb_anchor_urb(urb, &priv->tx_submitted); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) { - netdev_err(netdev, "Error transmitting URB\n"); - usb_unanchor_urb(urb); - kfree(buf); - usb_free_urb(urb); - return err; - } - - usb_free_urb(urb); - - return 0; -} - -static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, - const struct kvaser_usb_error_summary *es, - struct can_frame *cf) -{ - struct kvaser_usb *dev = priv->dev; - struct net_device_stats *stats = &priv->netdev->stats; - enum can_state cur_state, new_state, tx_state, rx_state; - - netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); - - new_state = cur_state = priv->can.state; - - if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) - new_state = CAN_STATE_BUS_OFF; - else if (es->status & M16C_STATE_BUS_PASSIVE) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->status & M16C_STATE_BUS_ERROR) { - /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if ((es->txerr >= 128) || (es->rxerr >= 128)) - new_state = CAN_STATE_ERROR_PASSIVE; - else if ((es->txerr >= 96) || (es->rxerr >= 96)) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } - } - - if (!es->status) - new_state = CAN_STATE_ERROR_ACTIVE; - - if (new_state != cur_state) { - tx_state = (es->txerr >= es->rxerr) ? new_state : 0; - rx_state = (es->txerr <= es->rxerr) ? new_state : 0; - - can_change_state(priv->netdev, cf, tx_state, rx_state); - } - - if (priv->can.restart_ms && - (cur_state >= CAN_STATE_BUS_OFF) && - (new_state < CAN_STATE_BUS_OFF)) { - priv->can.can_stats.restarts++; - } - - switch (dev->family) { - case KVASER_LEAF: - if (es->leaf.error_factor) { - priv->can.can_stats.bus_error++; - stats->rx_errors++; - } - break; - case KVASER_USBCAN: - if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) - stats->tx_errors++; - if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) - stats->rx_errors++; - if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) { - priv->can.can_stats.bus_error++; - } - break; - } - - priv->bec.txerr = es->txerr; - priv->bec.rxerr = es->rxerr; -} - -static void kvaser_usb_rx_error(const struct kvaser_usb *dev, - const struct kvaser_usb_error_summary *es) -{ - struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC }; - struct sk_buff *skb; - struct net_device_stats *stats; - struct kvaser_usb_net_priv *priv; - enum can_state old_state, new_state; - - if (es->channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", es->channel); - return; - } - - priv = dev->nets[es->channel]; - stats = &priv->netdev->stats; - - /* Update all of the can interface's state and error counters before - * trying any memory allocation that can actually fail with -ENOMEM. - * - * We send a temporary stack-allocated error can frame to - * can_change_state() for the very same reason. - * - * TODO: Split can_change_state() responsibility between updating the - * can interface's state and counters, and the setting up of can error - * frame ID and data to userspace. Remove stack allocation afterwards. - */ - old_state = priv->can.state; - kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf); - new_state = priv->can.state; - - skb = alloc_can_err_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - memcpy(cf, &tmp_cf, sizeof(*cf)); - - if (new_state != old_state) { - if (es->status & - (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { - if (!priv->can.restart_ms) - kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP); - netif_carrier_off(priv->netdev); - } - - if (priv->can.restart_ms && - (old_state >= CAN_STATE_BUS_OFF) && - (new_state < CAN_STATE_BUS_OFF)) { - cf->can_id |= CAN_ERR_RESTARTED; - netif_carrier_on(priv->netdev); - } - } - - switch (dev->family) { - case KVASER_LEAF: - if (es->leaf.error_factor) { - cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; - - if (es->leaf.error_factor & M16C_EF_ACKE) - cf->data[3] = CAN_ERR_PROT_LOC_ACK; - if (es->leaf.error_factor & M16C_EF_CRCE) - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; - if (es->leaf.error_factor & M16C_EF_FORME) - cf->data[2] |= CAN_ERR_PROT_FORM; - if (es->leaf.error_factor & M16C_EF_STFE) - cf->data[2] |= CAN_ERR_PROT_STUFF; - if (es->leaf.error_factor & M16C_EF_BITE0) - cf->data[2] |= CAN_ERR_PROT_BIT0; - if (es->leaf.error_factor & M16C_EF_BITE1) - cf->data[2] |= CAN_ERR_PROT_BIT1; - if (es->leaf.error_factor & M16C_EF_TRE) - cf->data[2] |= CAN_ERR_PROT_TX; - } - break; - case KVASER_USBCAN: - if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) { - cf->can_id |= CAN_ERR_BUSERROR; - } - break; - } - - cf->data[6] = es->txerr; - cf->data[7] = es->rxerr; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); -} - -/* For USBCAN, report error to userspace iff the channels's errors counter - * has changed, or we're the only channel seeing a bus error state. - */ -static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, - struct kvaser_usb_error_summary *es) -{ - struct kvaser_usb_net_priv *priv; - int channel; - bool report_error; - - channel = es->channel; - if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - report_error = false; - - if (es->txerr != priv->bec.txerr) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; - report_error = true; - } - if (es->rxerr != priv->bec.rxerr) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; - report_error = true; - } - if ((es->status & M16C_STATE_BUS_ERROR) && - !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { - es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; - report_error = true; - } - - if (report_error) - kvaser_usb_rx_error(dev, es); -} - -static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct kvaser_usb_error_summary es = { }; - - switch (msg->id) { - /* Sometimes errors are sent as unsolicited chip state events */ - case CMD_CHIP_STATE_EVENT: - es.channel = msg->u.usbcan.chip_state_event.channel; - es.status = msg->u.usbcan.chip_state_event.status; - es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count; - es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count; - kvaser_usbcan_conditionally_rx_error(dev, &es); - break; - - case CMD_CAN_ERROR_EVENT: - es.channel = 0; - es.status = msg->u.usbcan.error_event.status_ch0; - es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0; - es.usbcan.other_ch_status = - msg->u.usbcan.error_event.status_ch1; - kvaser_usbcan_conditionally_rx_error(dev, &es); - - /* The USBCAN firmware supports up to 2 channels. - * Now that ch0 was checked, check if ch1 has any errors. - */ - if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { - es.channel = 1; - es.status = msg->u.usbcan.error_event.status_ch1; - es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1; - es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1; - es.usbcan.other_ch_status = - msg->u.usbcan.error_event.status_ch0; - kvaser_usbcan_conditionally_rx_error(dev, &es); - } - break; - - default: - dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", - msg->id); - } -} - -static void kvaser_leaf_rx_error(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct kvaser_usb_error_summary es = { }; - - switch (msg->id) { - case CMD_CAN_ERROR_EVENT: - es.channel = msg->u.leaf.error_event.channel; - es.status = msg->u.leaf.error_event.status; - es.txerr = msg->u.leaf.error_event.tx_errors_count; - es.rxerr = msg->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = msg->u.leaf.error_event.error_factor; - break; - case CMD_LEAF_LOG_MESSAGE: - es.channel = msg->u.leaf.log_message.channel; - es.status = msg->u.leaf.log_message.data[0]; - es.txerr = msg->u.leaf.log_message.data[2]; - es.rxerr = msg->u.leaf.log_message.data[3]; - es.leaf.error_factor = msg->u.leaf.log_message.data[1]; - break; - case CMD_CHIP_STATE_EVENT: - es.channel = msg->u.leaf.chip_state_event.channel; - es.status = msg->u.leaf.chip_state_event.status; - es.txerr = msg->u.leaf.chip_state_event.tx_errors_count; - es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count; - es.leaf.error_factor = 0; - break; - default: - dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n", - msg->id); - return; - } - - kvaser_usb_rx_error(dev, &es); -} - -static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, - const struct kvaser_msg *msg) -{ - struct can_frame *cf; - struct sk_buff *skb; - struct net_device_stats *stats = &priv->netdev->stats; - - if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | - MSG_FLAG_NERR)) { - netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", - msg->u.rx_can_header.flag); - - stats->rx_errors++; - return; - } - - if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) { - stats->rx_over_errors++; - stats->rx_errors++; - - skb = alloc_can_err_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); - } -} - -static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct kvaser_usb_net_priv *priv; - struct can_frame *cf; - struct sk_buff *skb; - struct net_device_stats *stats; - u8 channel = msg->u.rx_can_header.channel; - const u8 *rx_msg = NULL; /* GCC */ - - if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - stats = &priv->netdev->stats; - - if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && - (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) { - kvaser_leaf_rx_error(dev, msg); - return; - } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | - MSG_FLAG_NERR | - MSG_FLAG_OVERRUN)) { - kvaser_usb_rx_can_err(priv, msg); - return; - } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { - netdev_warn(priv->netdev, - "Unhandled frame (flags: 0x%02x)", - msg->u.rx_can_header.flag); - return; - } - - switch (dev->family) { - case KVASER_LEAF: - rx_msg = msg->u.leaf.rx_can.msg; - break; - case KVASER_USBCAN: - rx_msg = msg->u.usbcan.rx_can.msg; - break; - } - - skb = alloc_can_skb(priv->netdev, &cf); - if (!skb) { - stats->rx_dropped++; - return; - } - - if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) { - cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id); - if (cf->can_id & KVASER_EXTENDED_FRAME) - cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; - else - cf->can_id &= CAN_SFF_MASK; - - cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc); - - if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &msg->u.leaf.log_message.data, - cf->can_dlc); - } else { - cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f); - - if (msg->id == CMD_RX_EXT_MESSAGE) { - cf->can_id <<= 18; - cf->can_id |= ((rx_msg[2] & 0x0f) << 14) | - ((rx_msg[3] & 0xff) << 6) | - (rx_msg[4] & 0x3f); - cf->can_id |= CAN_EFF_FLAG; - } - - cf->can_dlc = get_can_dlc(rx_msg[5]); - - if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) - cf->can_id |= CAN_RTR_FLAG; - else - memcpy(cf->data, &rx_msg[6], - cf->can_dlc); - } - - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); -} - -static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct kvaser_usb_net_priv *priv; - u8 channel = msg->u.simple.channel; - - if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - if (completion_done(&priv->start_comp) && - netif_queue_stopped(priv->netdev)) { - netif_wake_queue(priv->netdev); - } else { - netif_start_queue(priv->netdev); - complete(&priv->start_comp); - } -} - -static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - struct kvaser_usb_net_priv *priv; - u8 channel = msg->u.simple.channel; - - if (channel >= dev->nchannels) { - dev_err(dev->udev->dev.parent, - "Invalid channel number (%d)\n", channel); - return; - } - - priv = dev->nets[channel]; - - complete(&priv->stop_comp); -} - -static void kvaser_usb_handle_message(const struct kvaser_usb *dev, - const struct kvaser_msg *msg) -{ - switch (msg->id) { - case CMD_START_CHIP_REPLY: - kvaser_usb_start_chip_reply(dev, msg); - break; - - case CMD_STOP_CHIP_REPLY: - kvaser_usb_stop_chip_reply(dev, msg); - break; - - case CMD_RX_STD_MESSAGE: - case CMD_RX_EXT_MESSAGE: - kvaser_usb_rx_can_msg(dev, msg); - break; - - case CMD_LEAF_LOG_MESSAGE: - if (dev->family != KVASER_LEAF) - goto warn; - kvaser_usb_rx_can_msg(dev, msg); - break; - - case CMD_CHIP_STATE_EVENT: - case CMD_CAN_ERROR_EVENT: - if (dev->family == KVASER_LEAF) - kvaser_leaf_rx_error(dev, msg); - else - kvaser_usbcan_rx_error(dev, msg); - break; - - case CMD_TX_ACKNOWLEDGE: - kvaser_usb_tx_acknowledge(dev, msg); - break; - - /* Ignored messages */ - case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: - if (dev->family != KVASER_USBCAN) - goto warn; - break; - - case CMD_FLUSH_QUEUE_REPLY: - if (dev->family != KVASER_LEAF) - goto warn; - break; - - default: -warn: dev_warn(dev->udev->dev.parent, - "Unhandled message (%d)\n", msg->id); - break; - } -} - -static void kvaser_usb_read_bulk_callback(struct urb *urb) -{ - struct kvaser_usb *dev = urb->context; - struct kvaser_msg *msg; - int pos = 0; - int err, i; - - switch (urb->status) { - case 0: - break; - case -ENOENT: - case -EPIPE: - case -EPROTO: - case -ESHUTDOWN: - return; - default: - dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n", - urb->status); - goto resubmit_urb; - } - - while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) { - msg = urb->transfer_buffer + pos; - - /* The Kvaser firmware can only read and write messages that - * does not cross the USB's endpoint wMaxPacketSize boundary. - * If a follow-up command crosses such boundary, firmware puts - * a placeholder zero-length command in its place then aligns - * the real command to the next max packet size. - * - * Handle such cases or we're going to miss a significant - * number of events in case of a heavy rx load on the bus. - */ - if (msg->len == 0) { - pos = round_up(pos, le16_to_cpu(dev->bulk_in-> - wMaxPacketSize)); - continue; - } - - if (pos + msg->len > urb->actual_length) { - dev_err_ratelimited(dev->udev->dev.parent, - "Format error\n"); - break; - } - - kvaser_usb_handle_message(dev, msg); - pos += msg->len; - } - -resubmit_urb: - usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - urb->transfer_buffer, RX_BUFFER_SIZE, - kvaser_usb_read_bulk_callback, dev); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err == -ENODEV) { - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - netif_device_detach(dev->nets[i]->netdev); - } - } else if (err) { - dev_err(dev->udev->dev.parent, - "Failed resubmitting read bulk urb: %d\n", err); - } - - return; -} - -static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) -{ - int i, err = 0; - - if (dev->rxinitdone) - return 0; - - for (i = 0; i < MAX_RX_URBS; i++) { - struct urb *urb = NULL; - u8 *buf = NULL; - dma_addr_t buf_dma; - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - err = -ENOMEM; - break; - } - - buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, - GFP_KERNEL, &buf_dma); - if (!buf) { - dev_warn(dev->udev->dev.parent, - "No memory left for USB buffer\n"); - usb_free_urb(urb); - err = -ENOMEM; - break; - } - - usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - dev->bulk_in->bEndpointAddress), - buf, RX_BUFFER_SIZE, - kvaser_usb_read_bulk_callback, - dev); - urb->transfer_dma = buf_dma; - urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - usb_anchor_urb(urb, &dev->rx_submitted); - - err = usb_submit_urb(urb, GFP_KERNEL); - if (err) { - usb_unanchor_urb(urb); - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, - buf_dma); - usb_free_urb(urb); - break; - } - - dev->rxbuf[i] = buf; - dev->rxbuf_dma[i] = buf_dma; - - usb_free_urb(urb); - } - - if (i == 0) { - dev_warn(dev->udev->dev.parent, - "Cannot setup read URBs, error %d\n", err); - return err; - } else if (i < MAX_RX_URBS) { - dev_warn(dev->udev->dev.parent, - "RX performances may be slow\n"); - } - - dev->rxinitdone = true; - - return 0; -} - -static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) -{ - struct kvaser_msg *msg; - int rc; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->id = CMD_SET_CTRL_MODE; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode); - msg->u.ctrl_mode.tid = 0xff; - msg->u.ctrl_mode.channel = priv->channel; - - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) - msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; - else - msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; - - rc = kvaser_usb_send_msg(priv->dev, msg); - - kfree(msg); - return rc; -} - -static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv) -{ - int err; - - init_completion(&priv->start_comp); - - err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP, - priv->channel); - if (err) - return err; - - if (!wait_for_completion_timeout(&priv->start_comp, - msecs_to_jiffies(START_TIMEOUT))) - return -ETIMEDOUT; - - return 0; -} - -static int kvaser_usb_open(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - int err; - - err = open_candev(netdev); - if (err) - return err; - - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - - err = kvaser_usb_set_opt_mode(priv); - if (err) - goto error; - - err = kvaser_usb_start_chip(priv); - if (err) { - netdev_warn(netdev, "Cannot start device, error %d\n", err); - goto error; - } - - priv->can.state = CAN_STATE_ERROR_ACTIVE; - - return 0; - -error: - close_candev(netdev); - return err; -} - -static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) -{ - int i, max_tx_urbs; - - max_tx_urbs = priv->dev->max_tx_urbs; - - priv->active_tx_contexts = 0; - for (i = 0; i < max_tx_urbs; i++) - priv->tx_contexts[i].echo_index = max_tx_urbs; -} - -/* This method might sleep. Do not call it in the atomic context - * of URB completions. - */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - usb_kill_anchored_urbs(&priv->tx_submitted); - kvaser_usb_reset_tx_urb_contexts(priv); -} - -static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) -{ - int i; - - usb_kill_anchored_urbs(&dev->rx_submitted); - - for (i = 0; i < MAX_RX_URBS; i++) - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, - dev->rxbuf[i], - dev->rxbuf_dma[i]); - - for (i = 0; i < dev->nchannels; i++) { - struct kvaser_usb_net_priv *priv = dev->nets[i]; - - if (priv) - kvaser_usb_unlink_tx_urbs(priv); - } -} - -static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv) -{ - int err; - - init_completion(&priv->stop_comp); - - err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP, - priv->channel); - if (err) - return err; - - if (!wait_for_completion_timeout(&priv->stop_comp, - msecs_to_jiffies(STOP_TIMEOUT))) - return -ETIMEDOUT; - - return 0; -} - -static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) -{ - struct kvaser_msg *msg; - int rc; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->id = CMD_FLUSH_QUEUE; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue); - msg->u.flush_queue.channel = priv->channel; - msg->u.flush_queue.flags = 0x00; - - rc = kvaser_usb_send_msg(priv->dev, msg); - - kfree(msg); - return rc; -} - -static int kvaser_usb_close(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - int err; - - netif_stop_queue(netdev); - - err = kvaser_usb_flush_queue(priv); - if (err) - netdev_warn(netdev, "Cannot flush queue, error %d\n", err); - - err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel); - if (err) - netdev_warn(netdev, "Cannot reset card, error %d\n", err); - - err = kvaser_usb_stop_chip(priv); - if (err) - netdev_warn(netdev, "Cannot stop device, error %d\n", err); - - /* reset tx contexts */ - kvaser_usb_unlink_tx_urbs(priv); - - priv->can.state = CAN_STATE_STOPPED; - close_candev(priv->netdev); - - return 0; -} - -static void kvaser_usb_write_bulk_callback(struct urb *urb) -{ - struct kvaser_usb_tx_urb_context *context = urb->context; - struct kvaser_usb_net_priv *priv; - struct net_device *netdev; - - if (WARN_ON(!context)) - return; - - priv = context->priv; - netdev = priv->netdev; - - kfree(urb->transfer_buffer); - - if (!netif_device_present(netdev)) - return; - - if (urb->status) - netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); -} - -static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, - struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct kvaser_usb *dev = priv->dev; - struct net_device_stats *stats = &netdev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - struct kvaser_usb_tx_urb_context *context = NULL; - struct urb *urb; - void *buf; - struct kvaser_msg *msg; - int i, err, ret = NETDEV_TX_OK; - u8 *msg_tx_can_flags = NULL; /* GCC */ - unsigned long flags; - - if (can_dropped_invalid_skb(netdev, skb)) - return NETDEV_TX_OK; - - urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - stats->tx_dropped++; - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); - if (!buf) { - stats->tx_dropped++; - dev_kfree_skb(skb); - goto freeurb; - } - - msg = buf; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can); - msg->u.tx_can.channel = priv->channel; - - switch (dev->family) { - case KVASER_LEAF: - msg_tx_can_flags = &msg->u.tx_can.leaf.flags; - break; - case KVASER_USBCAN: - msg_tx_can_flags = &msg->u.tx_can.usbcan.flags; - break; - } - - *msg_tx_can_flags = 0; - - if (cf->can_id & CAN_EFF_FLAG) { - msg->id = CMD_TX_EXT_MESSAGE; - msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f; - msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f; - msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f; - msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff; - msg->u.tx_can.msg[4] = cf->can_id & 0x3f; - } else { - msg->id = CMD_TX_STD_MESSAGE; - msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f; - msg->u.tx_can.msg[1] = cf->can_id & 0x3f; - } - - msg->u.tx_can.msg[5] = cf->can_dlc; - memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc); - - if (cf->can_id & CAN_RTR_FLAG) - *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; - - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - for (i = 0; i < dev->max_tx_urbs; i++) { - if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { - context = &priv->tx_contexts[i]; - - context->echo_index = i; - can_put_echo_skb(skb, netdev, context->echo_index); - ++priv->active_tx_contexts; - if (priv->active_tx_contexts >= dev->max_tx_urbs) - netif_stop_queue(netdev); - - break; - } - } - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - - /* This should never happen; it implies a flow control bug */ - if (!context) { - netdev_warn(netdev, "cannot find free context\n"); - - kfree(buf); - ret = NETDEV_TX_BUSY; - goto freeurb; - } - - context->priv = priv; - context->dlc = cf->can_dlc; - - msg->u.tx_can.tid = context->echo_index; - - usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, - dev->bulk_out->bEndpointAddress), - buf, msg->len, - kvaser_usb_write_bulk_callback, context); - usb_anchor_urb(urb, &priv->tx_submitted); - - err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) { - spin_lock_irqsave(&priv->tx_contexts_lock, flags); - - can_free_echo_skb(netdev, context->echo_index); - context->echo_index = dev->max_tx_urbs; - --priv->active_tx_contexts; - netif_wake_queue(netdev); - - spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - - usb_unanchor_urb(urb); - kfree(buf); - - stats->tx_dropped++; - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "Failed tx_urb %d\n", err); - - goto freeurb; - } - - ret = NETDEV_TX_OK; - -freeurb: - usb_free_urb(urb); - return ret; -} - -static const struct net_device_ops kvaser_usb_netdev_ops = { - .ndo_open = kvaser_usb_open, - .ndo_stop = kvaser_usb_close, - .ndo_start_xmit = kvaser_usb_start_xmit, - .ndo_change_mtu = can_change_mtu, -}; - -static const struct can_bittiming_const kvaser_usb_bittiming_const = { - .name = "kvaser_usb", - .tseg1_min = KVASER_USB_TSEG1_MIN, - .tseg1_max = KVASER_USB_TSEG1_MAX, - .tseg2_min = KVASER_USB_TSEG2_MIN, - .tseg2_max = KVASER_USB_TSEG2_MAX, - .sjw_max = KVASER_USB_SJW_MAX, - .brp_min = KVASER_USB_BRP_MIN, - .brp_max = KVASER_USB_BRP_MAX, - .brp_inc = KVASER_USB_BRP_INC, -}; - -static int kvaser_usb_set_bittiming(struct net_device *netdev) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; - struct kvaser_usb *dev = priv->dev; - struct kvaser_msg *msg; - int rc; - - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - return -ENOMEM; - - msg->id = CMD_SET_BUS_PARAMS; - msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams); - msg->u.busparams.channel = priv->channel; - msg->u.busparams.tid = 0xff; - msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - msg->u.busparams.sjw = bt->sjw; - msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - msg->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - msg->u.busparams.no_samp = 3; - else - msg->u.busparams.no_samp = 1; - - rc = kvaser_usb_send_msg(dev, msg); - - kfree(msg); - return rc; -} - -static int kvaser_usb_set_mode(struct net_device *netdev, - enum can_mode mode) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - int err; - - switch (mode) { - case CAN_MODE_START: - err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP); - if (err) - return err; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int kvaser_usb_get_berr_counter(const struct net_device *netdev, - struct can_berr_counter *bec) -{ - struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - - *bec = priv->bec; - - return 0; -} - -static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) -{ - int i; - - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - unregister_candev(dev->nets[i]->netdev); - } - - kvaser_usb_unlink_all_urbs(dev); - - for (i = 0; i < dev->nchannels; i++) { - if (!dev->nets[i]) - continue; - - free_candev(dev->nets[i]->netdev); - } -} - -static int kvaser_usb_init_one(struct usb_interface *intf, - const struct usb_device_id *id, int channel) -{ - struct kvaser_usb *dev = usb_get_intfdata(intf); - struct net_device *netdev; - struct kvaser_usb_net_priv *priv; - int err; - - err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); - if (err) - return err; - - netdev = alloc_candev(sizeof(*priv) + - dev->max_tx_urbs * sizeof(*priv->tx_contexts), - dev->max_tx_urbs); - if (!netdev) { - dev_err(&intf->dev, "Cannot alloc candev\n"); - return -ENOMEM; - } - - priv = netdev_priv(netdev); - - init_usb_anchor(&priv->tx_submitted); - init_completion(&priv->start_comp); - init_completion(&priv->stop_comp); - - priv->dev = dev; - priv->netdev = netdev; - priv->channel = channel; - - spin_lock_init(&priv->tx_contexts_lock); - kvaser_usb_reset_tx_urb_contexts(priv); - - priv->can.state = CAN_STATE_STOPPED; - priv->can.clock.freq = CAN_USB_CLOCK; - priv->can.bittiming_const = &kvaser_usb_bittiming_const; - priv->can.do_set_bittiming = kvaser_usb_set_bittiming; - priv->can.do_set_mode = kvaser_usb_set_mode; - if (id->driver_info & KVASER_HAS_TXRX_ERRORS) - priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter; - priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; - if (id->driver_info & KVASER_HAS_SILENT_MODE) - priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; - - netdev->flags |= IFF_ECHO; - - netdev->netdev_ops = &kvaser_usb_netdev_ops; - - SET_NETDEV_DEV(netdev, &intf->dev); - netdev->dev_id = channel; - - dev->nets[channel] = priv; - - err = register_candev(netdev); - if (err) { - dev_err(&intf->dev, "Failed to register can device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; - } - - netdev_dbg(netdev, "device registered\n"); - - return 0; -} - -static int kvaser_usb_get_endpoints(const struct usb_interface *intf, - struct usb_endpoint_descriptor **in, - struct usb_endpoint_descriptor **out) -{ - const struct usb_host_interface *iface_desc; - struct usb_endpoint_descriptor *endpoint; - int i; - - iface_desc = &intf->altsetting[0]; - - for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { - endpoint = &iface_desc->endpoint[i].desc; - - if (!*in && usb_endpoint_is_bulk_in(endpoint)) - *in = endpoint; - - if (!*out && usb_endpoint_is_bulk_out(endpoint)) - *out = endpoint; - - /* use first bulk endpoint for in and out */ - if (*in && *out) - return 0; - } - - return -ENODEV; -} - -static int kvaser_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct kvaser_usb *dev; - int err = -ENOMEM; - int i, retry = 3; - - dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - if (kvaser_is_leaf(id)) { - dev->family = KVASER_LEAF; - } else if (kvaser_is_usbcan(id)) { - dev->family = KVASER_USBCAN; - } else { - dev_err(&intf->dev, - "Product ID (%d) does not belong to any known Kvaser USB family", - id->idProduct); - return -ENODEV; - } - - err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out); - if (err) { - dev_err(&intf->dev, "Cannot get usb endpoint(s)"); - return err; - } - - dev->udev = interface_to_usbdev(intf); - - init_usb_anchor(&dev->rx_submitted); - - usb_set_intfdata(intf, dev); - - /* On some x86 laptops, plugging a Kvaser device again after - * an unplug makes the firmware always ignore the very first - * command. For such a case, provide some room for retries - * instead of completely exiting the driver. - */ - do { - err = kvaser_usb_get_software_info(dev); - } while (--retry && err == -ETIMEDOUT); - - if (err) { - dev_err(&intf->dev, - "Cannot get software infos, error %d\n", err); - return err; - } - - dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", - ((dev->fw_version >> 24) & 0xff), - ((dev->fw_version >> 16) & 0xff), - (dev->fw_version & 0xffff)); - - dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); - - err = kvaser_usb_get_card_info(dev); - if (err) { - dev_err(&intf->dev, - "Cannot get card infos, error %d\n", err); - return err; - } - - for (i = 0; i < dev->nchannels; i++) { - err = kvaser_usb_init_one(intf, id, i); - if (err) { - kvaser_usb_remove_interfaces(dev); - return err; - } - } - - return 0; -} - -static void kvaser_usb_disconnect(struct usb_interface *intf) -{ - struct kvaser_usb *dev = usb_get_intfdata(intf); - - usb_set_intfdata(intf, NULL); - - if (!dev) - return; - - kvaser_usb_remove_interfaces(dev); -} - -static struct usb_driver kvaser_usb_driver = { - .name = "kvaser_usb", - .probe = kvaser_usb_probe, - .disconnect = kvaser_usb_disconnect, - .id_table = kvaser_usb_table, -}; - -module_usb_driver(kvaser_usb_driver); - -MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>"); -MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile new file mode 100644 index 000000000000..9f41ddab6a5a --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o +kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h new file mode 100644 index 000000000000..390b6bde883c --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * - Kvaser linux mhydra driver (version 5.24) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh + * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> + * Copyright (C) 2015 Valeo S.A. + */ + +#ifndef KVASER_USB_H +#define KVASER_USB_H + +/* Kvaser USB CAN dongles are divided into three major platforms: + * - Hydra: Running firmware labeled as 'mhydra' + * - Leaf: Based on Renesas M32C or Freescale i.MX28, running firmware labeled + * as 'filo' + * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios' + */ + +#include <linux/completion.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> + +#define KVASER_USB_MAX_RX_URBS 4 +#define KVASER_USB_MAX_TX_URBS 128 +#define KVASER_USB_TIMEOUT 1000 /* msecs */ +#define KVASER_USB_RX_BUFFER_SIZE 3072 +#define KVASER_USB_MAX_NET_DEVICES 5 + +/* USB devices features */ +#define KVASER_USB_HAS_SILENT_MODE BIT(0) +#define KVASER_USB_HAS_TXRX_ERRORS BIT(1) + +/* Device capabilities */ +#define KVASER_USB_CAP_BERR_CAP 0x01 +#define KVASER_USB_CAP_EXT_CAP 0x02 +#define KVASER_USB_HYDRA_CAP_EXT_CMD 0x04 + +struct kvaser_usb_dev_cfg; + +enum kvaser_usb_leaf_family { + KVASER_LEAF, + KVASER_USBCAN, +}; + +#define KVASER_USB_HYDRA_MAX_CMD_LEN 128 +struct kvaser_usb_dev_card_data_hydra { + u8 channel_to_he[KVASER_USB_MAX_NET_DEVICES]; + u8 sysdbg_he; + spinlock_t transid_lock; /* lock for transid */ + u16 transid; + /* lock for usb_rx_leftover and usb_rx_leftover_len */ + spinlock_t usb_rx_leftover_lock; + u8 usb_rx_leftover[KVASER_USB_HYDRA_MAX_CMD_LEN]; + u8 usb_rx_leftover_len; +}; +struct kvaser_usb_dev_card_data { + u32 ctrlmode_supported; + u32 capabilities; + union { + struct { + enum kvaser_usb_leaf_family family; + } leaf; + struct kvaser_usb_dev_card_data_hydra hydra; + }; +}; + +/* Context for an outstanding, not yet ACKed, transmission */ +struct kvaser_usb_tx_urb_context { + struct kvaser_usb_net_priv *priv; + u32 echo_index; + int dlc; +}; + +struct kvaser_usb { + struct usb_device *udev; + struct usb_interface *intf; + struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES]; + const struct kvaser_usb_dev_ops *ops; + const struct kvaser_usb_dev_cfg *cfg; + + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_anchor rx_submitted; + + /* @max_tx_urbs: Firmware-reported maximum number of outstanding, + * not yet ACKed, transmissions on this device. This value is + * also used as a sentinel for marking free tx contexts. + */ + u32 fw_version; + unsigned int nchannels; + unsigned int max_tx_urbs; + struct kvaser_usb_dev_card_data card_data; + + bool rxinitdone; + void *rxbuf[KVASER_USB_MAX_RX_URBS]; + dma_addr_t rxbuf_dma[KVASER_USB_MAX_RX_URBS]; +}; + +struct kvaser_usb_net_priv { + struct can_priv can; + struct can_berr_counter bec; + + struct kvaser_usb *dev; + struct net_device *netdev; + int channel; + + struct completion start_comp, stop_comp, flush_comp; + struct usb_anchor tx_submitted; + + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ + int active_tx_contexts; + struct kvaser_usb_tx_urb_context tx_contexts[]; +}; + +/** + * struct kvaser_usb_dev_ops - Device specific functions + * @dev_set_mode: used for can.do_set_mode + * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_get_berr_counter: used for can.do_get_berr_counter + * + * @dev_setup_endpoints: setup USB in and out endpoints + * @dev_init_card: initialize card + * @dev_get_software_info: get software info + * @dev_get_software_details: get software details + * @dev_get_card_info: get card info + * @dev_get_capabilities: discover device capabilities + * + * @dev_set_opt_mode: set ctrlmod + * @dev_start_chip: start the CAN controller + * @dev_stop_chip: stop the CAN controller + * @dev_reset_chip: reset the CAN controller + * @dev_flush_queue: flush outstanding CAN messages + * @dev_read_bulk_callback: handle incoming commands + * @dev_frame_to_cmd: translate struct can_frame into device command + */ +struct kvaser_usb_dev_ops { + int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); + int (*dev_set_bittiming)(struct net_device *netdev); + int (*dev_set_data_bittiming)(struct net_device *netdev); + int (*dev_get_berr_counter)(const struct net_device *netdev, + struct can_berr_counter *bec); + int (*dev_setup_endpoints)(struct kvaser_usb *dev); + int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_get_software_info)(struct kvaser_usb *dev); + int (*dev_get_software_details)(struct kvaser_usb *dev); + int (*dev_get_card_info)(struct kvaser_usb *dev); + int (*dev_get_capabilities)(struct kvaser_usb *dev); + int (*dev_set_opt_mode)(const struct kvaser_usb_net_priv *priv); + int (*dev_start_chip)(struct kvaser_usb_net_priv *priv); + int (*dev_stop_chip)(struct kvaser_usb_net_priv *priv); + int (*dev_reset_chip)(struct kvaser_usb *dev, int channel); + int (*dev_flush_queue)(struct kvaser_usb_net_priv *priv); + void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf, + int len); + void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid); +}; + +struct kvaser_usb_dev_cfg { + const struct can_clock clock; + const unsigned int timestamp_freq; + const struct can_bittiming_const * const bittiming_const; + const struct can_bittiming_const * const data_bittiming_const; +}; + +extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; +extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; + +int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, + int *actual_len); + +int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len); + +int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, + int len); + +int kvaser_usb_can_rx_over_error(struct net_device *netdev); +#endif /* KVASER_USB_H */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c new file mode 100644 index 000000000000..b939a4c10b84 --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -0,0 +1,835 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * - Kvaser linux mhydra driver (version 5.24) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh + * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> + * Copyright (C) 2015 Valeo S.A. + */ + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/if.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/netlink.h> + +#include "kvaser_usb.h" + +/* Kvaser USB vendor id. */ +#define KVASER_VENDOR_ID 0x0bfd + +/* Kvaser Leaf USB devices product ids */ +#define USB_LEAF_DEVEL_PRODUCT_ID 10 +#define USB_LEAF_LITE_PRODUCT_ID 11 +#define USB_LEAF_PRO_PRODUCT_ID 12 +#define USB_LEAF_SPRO_PRODUCT_ID 14 +#define USB_LEAF_PRO_LS_PRODUCT_ID 15 +#define USB_LEAF_PRO_SWC_PRODUCT_ID 16 +#define USB_LEAF_PRO_LIN_PRODUCT_ID 17 +#define USB_LEAF_SPRO_LS_PRODUCT_ID 18 +#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19 +#define USB_MEMO2_DEVEL_PRODUCT_ID 22 +#define USB_MEMO2_HSHS_PRODUCT_ID 23 +#define USB_UPRO_HSHS_PRODUCT_ID 24 +#define USB_LEAF_LITE_GI_PRODUCT_ID 25 +#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26 +#define USB_MEMO2_HSLS_PRODUCT_ID 27 +#define USB_LEAF_LITE_CH_PRODUCT_ID 28 +#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29 +#define USB_OEM_MERCURY_PRODUCT_ID 34 +#define USB_OEM_LEAF_PRODUCT_ID 35 +#define USB_CAN_R_PRODUCT_ID 39 +#define USB_LEAF_LITE_V2_PRODUCT_ID 288 +#define USB_MINI_PCIE_HS_PRODUCT_ID 289 +#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 +#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 +#define USB_MINI_PCIE_2HS_PRODUCT_ID 292 + +/* Kvaser USBCan-II devices product ids */ +#define USB_USBCAN_REVB_PRODUCT_ID 2 +#define USB_VCI2_PRODUCT_ID 3 +#define USB_USBCAN2_PRODUCT_ID 4 +#define USB_MEMORATOR_PRODUCT_ID 5 + +/* Kvaser Minihydra USB devices product ids */ +#define USB_BLACKBIRD_V2_PRODUCT_ID 258 +#define USB_MEMO_PRO_5HS_PRODUCT_ID 260 +#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261 +#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262 +#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263 +#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264 +#define USB_MEMO_2HS_PRODUCT_ID 265 +#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266 +#define USB_HYBRID_CANLIN_PRODUCT_ID 267 +#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268 +#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269 +#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270 + +static inline bool kvaser_is_leaf(const struct usb_device_id *id) +{ + return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && + id->idProduct <= USB_CAN_R_PRODUCT_ID) || + (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && + id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID); +} + +static inline bool kvaser_is_usbcan(const struct usb_device_id *id) +{ + return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID && + id->idProduct <= USB_MEMORATOR_PRODUCT_ID; +} + +static inline bool kvaser_is_hydra(const struct usb_device_id *id) +{ + return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && + id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID; +} + +static const struct usb_device_id kvaser_usb_table[] = { + /* Leaf USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS | + KVASER_USB_HAS_SILENT_MODE }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, + + /* USBCANII USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), + .driver_info = KVASER_USB_HAS_TXRX_ERRORS }, + + /* Minihydra USB product IDs */ + { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, + { } +}; +MODULE_DEVICE_TABLE(usb, kvaser_usb_table); + +int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len) +{ + int actual_len; /* Not used */ + + return usb_bulk_msg(dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + cmd, len, &actual_len, KVASER_USB_TIMEOUT); +} + +int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, + int *actual_len) +{ + return usb_bulk_msg(dev->udev, + usb_rcvbulkpipe(dev->udev, + dev->bulk_in->bEndpointAddress), + cmd, len, actual_len, KVASER_USB_TIMEOUT); +} + +static void kvaser_usb_send_cmd_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + + kfree(urb->transfer_buffer); + + if (urb->status) + netdev_warn(netdev, "urb status received: %d\n", urb->status); +} + +int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, + int len) +{ + struct kvaser_usb *dev = priv->dev; + struct net_device *netdev = priv->netdev; + struct urb *urb; + int err; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; + + usb_fill_bulk_urb(urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + cmd, len, kvaser_usb_send_cmd_callback, netdev); + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err) { + netdev_err(netdev, "Error transmitting URB\n"); + usb_unanchor_urb(urb); + } + usb_free_urb(urb); + + return 0; +} + +int kvaser_usb_can_rx_over_error(struct net_device *netdev) +{ + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + stats->rx_over_errors++; + stats->rx_errors++; + + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return -ENOMEM; + } + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + + return 0; +} + +static void kvaser_usb_read_bulk_callback(struct urb *urb) +{ + struct kvaser_usb *dev = urb->context; + int err; + unsigned int i; + + switch (urb->status) { + case 0: + break; + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + return; + default: + dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status); + goto resubmit_urb; + } + + dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer, + urb->actual_length); + +resubmit_urb: + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe(dev->udev, + dev->bulk_in->bEndpointAddress), + urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE, + kvaser_usb_read_bulk_callback, dev); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err == -ENODEV) { + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + netif_device_detach(dev->nets[i]->netdev); + } + } else if (err) { + dev_err(&dev->intf->dev, + "Failed resubmitting read bulk urb: %d\n", err); + } +} + +static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) +{ + int i, err = 0; + + if (dev->rxinitdone) + return 0; + + for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; + dma_addr_t buf_dma; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + err = -ENOMEM; + break; + } + + buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, + GFP_KERNEL, &buf_dma); + if (!buf) { + dev_warn(&dev->intf->dev, + "No memory left for USB buffer\n"); + usb_free_urb(urb); + err = -ENOMEM; + break; + } + + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe + (dev->udev, + dev->bulk_in->bEndpointAddress), + buf, KVASER_USB_RX_BUFFER_SIZE, + kvaser_usb_read_bulk_callback, dev); + urb->transfer_dma = buf_dma; + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + usb_anchor_urb(urb, &dev->rx_submitted); + + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) { + usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, + KVASER_USB_RX_BUFFER_SIZE, buf, + buf_dma); + usb_free_urb(urb); + break; + } + + dev->rxbuf[i] = buf; + dev->rxbuf_dma[i] = buf_dma; + + usb_free_urb(urb); + } + + if (i == 0) { + dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n", + err); + return err; + } else if (i < KVASER_USB_MAX_RX_URBS) { + dev_warn(&dev->intf->dev, "RX performances may be slow\n"); + } + + dev->rxinitdone = true; + + return 0; +} + +static int kvaser_usb_open(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + int err; + + err = open_candev(netdev); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(dev); + if (err) + goto error; + + err = dev->ops->dev_set_opt_mode(priv); + if (err) + goto error; + + err = dev->ops->dev_start_chip(priv); + if (err) { + netdev_warn(netdev, "Cannot start device, error %d\n", err); + goto error; + } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + return 0; + +error: + close_candev(netdev); + return err; +} + +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i, max_tx_urbs; + + max_tx_urbs = priv->dev->max_tx_urbs; + + priv->active_tx_contexts = 0; + for (i = 0; i < max_tx_urbs; i++) + priv->tx_contexts[i].echo_index = max_tx_urbs; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + +static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) +{ + int i; + + usb_kill_anchored_urbs(&dev->rx_submitted); + + for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) + usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, + dev->rxbuf[i], dev->rxbuf_dma[i]); + + for (i = 0; i < dev->nchannels; i++) { + struct kvaser_usb_net_priv *priv = dev->nets[i]; + + if (priv) + kvaser_usb_unlink_tx_urbs(priv); + } +} + +static int kvaser_usb_close(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + int err; + + netif_stop_queue(netdev); + + err = dev->ops->dev_flush_queue(priv); + if (err) + netdev_warn(netdev, "Cannot flush queue, error %d\n", err); + + if (dev->ops->dev_reset_chip) { + err = dev->ops->dev_reset_chip(dev, priv->channel); + if (err) + netdev_warn(netdev, "Cannot reset card, error %d\n", + err); + } + + err = dev->ops->dev_stop_chip(priv); + if (err) + netdev_warn(netdev, "Cannot stop device, error %d\n", err); + + /* reset tx contexts */ + kvaser_usb_unlink_tx_urbs(priv); + + priv->can.state = CAN_STATE_STOPPED; + close_candev(priv->netdev); + + return 0; +} + +static void kvaser_usb_write_bulk_callback(struct urb *urb) +{ + struct kvaser_usb_tx_urb_context *context = urb->context; + struct kvaser_usb_net_priv *priv; + struct net_device *netdev; + + if (WARN_ON(!context)) + return; + + priv = context->priv; + netdev = priv->netdev; + + kfree(urb->transfer_buffer); + + if (!netif_device_present(netdev)) + return; + + if (urb->status) + netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); +} + +static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + struct net_device_stats *stats = &netdev->stats; + struct kvaser_usb_tx_urb_context *context = NULL; + struct urb *urb; + void *buf; + int cmd_len = 0; + int err, ret = NETDEV_TX_OK; + unsigned int i; + unsigned long flags; + + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + for (i = 0; i < dev->max_tx_urbs; i++) { + if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { + context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) + netif_stop_queue(netdev); + + break; + } + } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + + /* This should never happen; it implies a flow control bug */ + if (!context) { + netdev_warn(netdev, "cannot find free context\n"); + + ret = NETDEV_TX_BUSY; + goto freeurb; + } + + buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len, + context->echo_index); + if (!buf) { + stats->tx_dropped++; + dev_kfree_skb(skb); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + goto freeurb; + } + + context->priv = priv; + + usb_fill_bulk_urb(urb, dev->udev, + usb_sndbulkpipe(dev->udev, + dev->bulk_out->bEndpointAddress), + buf, cmd_len, kvaser_usb_write_bulk_callback, + context); + usb_anchor_urb(urb, &priv->tx_submitted); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + + usb_unanchor_urb(urb); + kfree(buf); + + stats->tx_dropped++; + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "Failed tx_urb %d\n", err); + + goto freeurb; + } + + ret = NETDEV_TX_OK; + +freeurb: + usb_free_urb(urb); + return ret; +} + +static const struct net_device_ops kvaser_usb_netdev_ops = { + .ndo_open = kvaser_usb_open, + .ndo_stop = kvaser_usb_close, + .ndo_start_xmit = kvaser_usb_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) +{ + int i; + + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + unregister_candev(dev->nets[i]->netdev); + } + + kvaser_usb_unlink_all_urbs(dev); + + for (i = 0; i < dev->nchannels; i++) { + if (!dev->nets[i]) + continue; + + free_candev(dev->nets[i]->netdev); + } +} + +static int kvaser_usb_init_one(struct kvaser_usb *dev, + const struct usb_device_id *id, int channel) +{ + struct net_device *netdev; + struct kvaser_usb_net_priv *priv; + int err; + + if (dev->ops->dev_reset_chip) { + err = dev->ops->dev_reset_chip(dev, channel); + if (err) + return err; + } + + netdev = alloc_candev(sizeof(*priv) + + dev->max_tx_urbs * sizeof(*priv->tx_contexts), + dev->max_tx_urbs); + if (!netdev) { + dev_err(&dev->intf->dev, "Cannot alloc candev\n"); + return -ENOMEM; + } + + priv = netdev_priv(netdev); + + init_usb_anchor(&priv->tx_submitted); + init_completion(&priv->start_comp); + init_completion(&priv->stop_comp); + priv->can.ctrlmode_supported = 0; + + priv->dev = dev; + priv->netdev = netdev; + priv->channel = channel; + + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + + priv->can.state = CAN_STATE_STOPPED; + priv->can.clock.freq = dev->cfg->clock.freq; + priv->can.bittiming_const = dev->cfg->bittiming_const; + priv->can.do_set_bittiming = dev->ops->dev_set_bittiming; + priv->can.do_set_mode = dev->ops->dev_set_mode; + if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) || + (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) + priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter; + if (id->driver_info & KVASER_USB_HAS_SILENT_MODE) + priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; + + priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; + + if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; + priv->can.do_set_data_bittiming = + dev->ops->dev_set_data_bittiming; + } + + netdev->flags |= IFF_ECHO; + + netdev->netdev_ops = &kvaser_usb_netdev_ops; + + SET_NETDEV_DEV(netdev, &dev->intf->dev); + netdev->dev_id = channel; + + dev->nets[channel] = priv; + + err = register_candev(netdev); + if (err) { + dev_err(&dev->intf->dev, "Failed to register CAN device\n"); + free_candev(netdev); + dev->nets[channel] = NULL; + return err; + } + + netdev_dbg(netdev, "device registered\n"); + + return 0; +} + +static int kvaser_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct kvaser_usb *dev; + int err; + int i; + + dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + if (kvaser_is_leaf(id)) { + dev->card_data.leaf.family = KVASER_LEAF; + dev->ops = &kvaser_usb_leaf_dev_ops; + } else if (kvaser_is_usbcan(id)) { + dev->card_data.leaf.family = KVASER_USBCAN; + dev->ops = &kvaser_usb_leaf_dev_ops; + } else if (kvaser_is_hydra(id)) { + dev->ops = &kvaser_usb_hydra_dev_ops; + } else { + dev_err(&intf->dev, + "Product ID (%d) is not a supported Kvaser USB device\n", + id->idProduct); + return -ENODEV; + } + + dev->intf = intf; + + err = dev->ops->dev_setup_endpoints(dev); + if (err) { + dev_err(&intf->dev, "Cannot get usb endpoint(s)"); + return err; + } + + dev->udev = interface_to_usbdev(intf); + + init_usb_anchor(&dev->rx_submitted); + + usb_set_intfdata(intf, dev); + + dev->card_data.ctrlmode_supported = 0; + dev->card_data.capabilities = 0; + err = dev->ops->dev_init_card(dev); + if (err) { + dev_err(&intf->dev, + "Failed to initialize card, error %d\n", err); + return err; + } + + err = dev->ops->dev_get_software_info(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get software info, error %d\n", err); + return err; + } + + if (dev->ops->dev_get_software_details) { + err = dev->ops->dev_get_software_details(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get software details, error %d\n", err); + return err; + } + } + + if (WARN_ON(!dev->cfg)) + return -ENODEV; + + dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", + ((dev->fw_version >> 24) & 0xff), + ((dev->fw_version >> 16) & 0xff), + (dev->fw_version & 0xffff)); + + dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); + + err = dev->ops->dev_get_card_info(dev); + if (err) { + dev_err(&intf->dev, "Cannot get card info, error %d\n", err); + return err; + } + + if (dev->ops->dev_get_capabilities) { + err = dev->ops->dev_get_capabilities(dev); + if (err) { + dev_err(&intf->dev, + "Cannot get capabilities, error %d\n", err); + kvaser_usb_remove_interfaces(dev); + return err; + } + } + + for (i = 0; i < dev->nchannels; i++) { + err = kvaser_usb_init_one(dev, id, i); + if (err) { + kvaser_usb_remove_interfaces(dev); + return err; + } + } + + return 0; +} + +static void kvaser_usb_disconnect(struct usb_interface *intf) +{ + struct kvaser_usb *dev = usb_get_intfdata(intf); + + usb_set_intfdata(intf, NULL); + + if (!dev) + return; + + kvaser_usb_remove_interfaces(dev); +} + +static struct usb_driver kvaser_usb_driver = { + .name = "kvaser_usb", + .probe = kvaser_usb_probe, + .disconnect = kvaser_usb_disconnect, + .id_table = kvaser_usb_table, +}; + +module_usb_driver(kvaser_usb_driver); + +MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>"); +MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); +MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c new file mode 100644 index 000000000000..c084bae5ec0a --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -0,0 +1,2028 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux mhydra driver (version 5.24) + * - CAN driver for esd CAN-USB/2 + * + * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh + * + * Known issues: + * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only + * reported after a call to do_get_berr_counter(), since firmware does not + * distinguish between ERROR_WARNING and ERROR_ACTIVE. + * - Hardware timestamps are not set for CAN Tx frames. + */ + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/netlink.h> + +#include "kvaser_usb.h" + +/* Forward declarations */ +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan; +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; + +#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82 +#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02 + +#define KVASER_USB_HYDRA_MAX_TRANSID 0xff +#define KVASER_USB_HYDRA_MIN_TRANSID 0x01 + +/* Minihydra command IDs */ +#define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_CHIP_STATE_REQ 19 +#define CMD_CHIP_STATE_EVENT 20 +#define CMD_SET_DRIVERMODE_REQ 21 +#define CMD_START_CHIP_REQ 26 +#define CMD_START_CHIP_RESP 27 +#define CMD_STOP_CHIP_REQ 28 +#define CMD_STOP_CHIP_RESP 29 +#define CMD_TX_CAN_MESSAGE 33 +#define CMD_GET_CARD_INFO_REQ 34 +#define CMD_GET_CARD_INFO_RESP 35 +#define CMD_GET_SOFTWARE_INFO_REQ 38 +#define CMD_GET_SOFTWARE_INFO_RESP 39 +#define CMD_ERROR_EVENT 45 +#define CMD_FLUSH_QUEUE 48 +#define CMD_TX_ACKNOWLEDGE 50 +#define CMD_FLUSH_QUEUE_RESP 66 +#define CMD_SET_BUSPARAMS_FD_REQ 69 +#define CMD_SET_BUSPARAMS_FD_RESP 70 +#define CMD_SET_BUSPARAMS_RESP 85 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 +#define CMD_RX_MESSAGE 106 +#define CMD_MAP_CHANNEL_REQ 200 +#define CMD_MAP_CHANNEL_RESP 201 +#define CMD_GET_SOFTWARE_DETAILS_REQ 202 +#define CMD_GET_SOFTWARE_DETAILS_RESP 203 +#define CMD_EXTENDED 255 + +/* Minihydra extended command IDs */ +#define CMD_TX_CAN_MESSAGE_FD 224 +#define CMD_TX_ACKNOWLEDGE_FD 225 +#define CMD_RX_MESSAGE_FD 226 + +/* Hydra commands are handled by different threads in firmware. + * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit + * address. The address is used in hydra commands to get/set source and + * destination HE. There are two predefined HE addresses, the remaining + * addresses are different between devices and firmware versions. Hence, we need + * to enumerate the addresses (see kvaser_usb_hydra_map_channel()). + */ + +/* Well-known HE addresses */ +#define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00 +#define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e + +#define KVASER_USB_HYDRA_TRANSID_CANHE 0x40 +#define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61 + +struct kvaser_cmd_map_ch_req { + char name[16]; + u8 channel; + u8 reserved[11]; +} __packed; + +struct kvaser_cmd_map_ch_res { + u8 he_addr; + u8 channel; + u8 reserved[26]; +} __packed; + +struct kvaser_cmd_card_info { + __le32 serial_number; + __le32 clock_res; + __le32 mfg_date; + __le32 ean[2]; + u8 hw_version; + u8 usb_mode; + u8 hw_type; + u8 reserved0; + u8 nchannels; + u8 reserved1[3]; +} __packed; + +struct kvaser_cmd_sw_info { + u8 reserved0[8]; + __le16 max_outstanding_tx; + u8 reserved1[18]; +} __packed; + +struct kvaser_cmd_sw_detail_req { + u8 use_ext_cmd; + u8 reserved[27]; +} __packed; + +/* Software detail flags */ +#define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2) +#define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4) +#define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5) +#define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9) +#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10) +#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11) +#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12) +struct kvaser_cmd_sw_detail_res { + __le32 sw_flags; + __le32 sw_version; + __le32 sw_name; + __le32 ean[2]; + __le32 max_bitrate; + u8 reserved[4]; +} __packed; + +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05 +#define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06 +struct kvaser_cmd_cap_req { + __le16 cap_cmd; + u8 reserved[26]; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_HYDRA_CAP_STAT_OK 0x00 +#define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; + u8 reserved[16]; +} __packed; + +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01 +#define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09 +struct kvaser_cmd_error_event { + __le16 timestamp[3]; + u8 reserved; + u8 error_code; + __le16 info1; + __le16 info2; +} __packed; + +/* Chip state status flags. Used for chip_state_event and err_frame_data. */ +#define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00 +#define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5) +#define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6) +struct kvaser_cmd_chip_state_event { + __le16 timestamp[3]; + u8 tx_err_counter; + u8 rx_err_counter; + u8 bus_status; + u8 reserved[19]; +} __packed; + +/* Busparam modes */ +#define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00 +#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 +#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 +struct kvaser_cmd_set_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; + u8 reserved0[4]; + __le32 bitrate_d; + u8 tseg1_d; + u8 tseg2_d; + u8 sjw_d; + u8 nsamples_d; + u8 canfd_mode; + u8 reserved1[7]; +} __packed; + +/* Ctrl modes */ +#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 +#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 +struct kvaser_cmd_set_ctrlmode { + u8 mode; + u8 reserved[27]; +} __packed; + +struct kvaser_err_frame_data { + u8 bus_status; + u8 reserved0; + u8 tx_err_counter; + u8 rx_err_counter; + u8 reserved1[4]; +} __packed; + +struct kvaser_cmd_rx_can { + u8 cmd_len; + u8 cmd_no; + u8 channel; + u8 flags; + __le16 timestamp[3]; + u8 dlc; + u8 padding; + __le32 id; + union { + u8 data[8]; + struct kvaser_err_frame_data err_frame_data; + }; +} __packed; + +/* Extended CAN ID flag. Used in rx_can and tx_can */ +#define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31) +struct kvaser_cmd_tx_can { + __le32 id; + u8 data[8]; + u8 dlc; + u8 flags; + __le16 transid; + u8 channel; + u8 reserved[11]; +} __packed; + +struct kvaser_cmd_header { + u8 cmd_no; + /* The destination HE address is stored in 0..5 of he_addr. + * The upper part of source HE address is stored in 6..7 of he_addr, and + * the lower part is stored in 12..15 of transid. + */ + u8 he_addr; + __le16 transid; +} __packed; + +struct kvaser_cmd { + struct kvaser_cmd_header header; + union { + struct kvaser_cmd_map_ch_req map_ch_req; + struct kvaser_cmd_map_ch_res map_ch_res; + + struct kvaser_cmd_card_info card_info; + struct kvaser_cmd_sw_info sw_info; + struct kvaser_cmd_sw_detail_req sw_detail_req; + struct kvaser_cmd_sw_detail_res sw_detail_res; + + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; + + struct kvaser_cmd_error_event error_event; + + struct kvaser_cmd_set_busparams set_busparams_req; + + struct kvaser_cmd_chip_state_event chip_state_event; + + struct kvaser_cmd_set_ctrlmode set_ctrlmode; + + struct kvaser_cmd_rx_can rx_can; + struct kvaser_cmd_tx_can tx_can; + } __packed; +} __packed; + +/* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */ +#define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0) +#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) +#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) +#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +/* CAN frame flags. Used in ext_rx_can and ext_tx_can */ +#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) +#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) +#define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16) +#define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17) +#define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18) + +/* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */ +#define KVASER_USB_KCAN_DATA_DLC_BITS 4 +#define KVASER_USB_KCAN_DATA_DLC_SHIFT 8 +#define KVASER_USB_KCAN_DATA_DLC_MASK \ + GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \ + KVASER_USB_KCAN_DATA_DLC_SHIFT, \ + KVASER_USB_KCAN_DATA_DLC_SHIFT) + +#define KVASER_USB_KCAN_DATA_BRS BIT(14) +#define KVASER_USB_KCAN_DATA_FDF BIT(15) +#define KVASER_USB_KCAN_DATA_OSM BIT(16) +#define KVASER_USB_KCAN_DATA_AREQ BIT(31) +#define KVASER_USB_KCAN_DATA_SRR BIT(31) +#define KVASER_USB_KCAN_DATA_RTR BIT(29) +#define KVASER_USB_KCAN_DATA_IDE BIT(30) +struct kvaser_cmd_ext_rx_can { + __le32 flags; + __le32 id; + __le32 kcan_id; + __le32 kcan_header; + __le64 timestamp; + union { + u8 kcan_payload[64]; + struct kvaser_err_frame_data err_frame_data; + }; +} __packed; + +struct kvaser_cmd_ext_tx_can { + __le32 flags; + __le32 id; + __le32 kcan_id; + __le32 kcan_header; + u8 databytes; + u8 dlc; + u8 reserved[6]; + u8 kcan_payload[64]; +} __packed; + +struct kvaser_cmd_ext_tx_ack { + __le32 flags; + u8 reserved0[4]; + __le64 timestamp; + u8 reserved1[8]; +} __packed; + +/* struct for extended commands (CMD_EXTENDED) */ +struct kvaser_cmd_ext { + struct kvaser_cmd_header header; + __le16 len; + u8 cmd_no_ext; + u8 reserved; + + union { + struct kvaser_cmd_ext_rx_can rx_can; + struct kvaser_cmd_ext_tx_can tx_can; + struct kvaser_cmd_ext_tx_ack tx_ack; + } __packed; +} __packed; + +static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { + .name = "kvaser_usb_kcan", + .tseg1_min = 1, + .tseg1_max = 255, + .tseg2_min = 1, + .tseg2_max = 32, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 4096, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { + .name = "kvaser_usb_flex", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +#define KVASER_USB_HYDRA_TRANSID_BITS 12 +#define KVASER_USB_HYDRA_TRANSID_MASK \ + GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0) +#define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6) +#define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0) +#define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2 +static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd) +{ + return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK; +} + +static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd, + u16 transid) +{ + cmd->header.transid = + cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK); +} + +static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd) +{ + return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >> + KVASER_USB_HYDRA_HE_ADDR_SRC_BITS | + le16_to_cpu(cmd->header.transid) >> + KVASER_USB_HYDRA_TRANSID_BITS; +} + +static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd, + u8 dest_he) +{ + cmd->header.he_addr = + (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) | + (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK); +} + +static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + int i; + u8 channel = 0xff; + u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd); + + for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { + if (dev->card_data.hydra.channel_to_he[i] == src_he) { + channel = i; + break; + } + } + + return channel; +} + +static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev) +{ + unsigned long flags; + u16 transid; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + spin_lock_irqsave(&card_data->transid_lock, flags); + transid = card_data->transid; + if (transid >= KVASER_USB_HYDRA_MAX_TRANSID) + transid = KVASER_USB_HYDRA_MIN_TRANSID; + else + transid++; + card_data->transid = transid; + spin_unlock_irqrestore(&card_data->transid_lock, flags); + + return transid; +} + +static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd) +{ + size_t ret; + + if (cmd->header.cmd_no == CMD_EXTENDED) + ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len); + else + ret = sizeof(struct kvaser_cmd); + + return ret; +} + +static struct kvaser_usb_net_priv * +kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv = NULL; + u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd); + + if (channel >= dev->nchannels) + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + else + priv = dev->nets[channel]; + + return priv; +} + +static ktime_t +kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg, + const struct kvaser_cmd *cmd) +{ + u64 ticks; + + if (cmd->header.cmd_no == CMD_EXTENDED) { + struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; + + ticks = le64_to_cpu(cmd_ext->rx_can.timestamp); + } else { + ticks = le16_to_cpu(cmd->rx_can.timestamp[0]); + ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16; + ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32; + } + + return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); +} + +static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, + u8 cmd_no, int channel) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; + if (channel < 0) { + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); + } else { + if (channel >= KVASER_USB_MAX_NET_DEVICES) { + dev_err(&dev->intf->dev, "channel (%d) out of range.\n", + channel); + err = -EINVAL; + goto end; + } + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[channel]); + } + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + +end: + kfree(cmd); + + return err; +} + +static int +kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, + u8 cmd_no) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb *dev = priv->dev; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = cmd_no; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd_async(priv, cmd, + kvaser_usb_hydra_cmd_size(cmd)); + if (err) + kfree(cmd); + + return err; +} + +/* This function is used for synchronously waiting on hydra control commands. + * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to + * handle partial hydra commands. Since hydra control commands are always + * non-extended commands. + */ +static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no, + struct kvaser_cmd *cmd) +{ + void *buf; + int err; + unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); + + if (cmd->header.cmd_no == CMD_EXTENDED) { + dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n"); + return -EINVAL; + } + + buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + do { + int actual_len = 0; + int pos = 0; + + err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, + &actual_len); + if (err < 0) + goto end; + + while (pos < actual_len) { + struct kvaser_cmd *tmp_cmd; + size_t cmd_len; + + tmp_cmd = buf + pos; + cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd); + if (pos + cmd_len > actual_len) { + dev_err_ratelimited(&dev->intf->dev, + "Format error\n"); + break; + } + + if (tmp_cmd->header.cmd_no == cmd_no) { + memcpy(cmd, tmp_cmd, cmd_len); + goto end; + } + pos += cmd_len; + } + } while (time_before(jiffies, timeout)); + + err = -EINVAL; + +end: + kfree(buf); + + return err; +} + +static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 he, channel; + u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + if (transid > 0x007f || transid < 0x0040) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n", + transid); + return -EINVAL; + } + + switch (transid) { + case KVASER_USB_HYDRA_TRANSID_CANHE: + case KVASER_USB_HYDRA_TRANSID_CANHE + 1: + case KVASER_USB_HYDRA_TRANSID_CANHE + 2: + case KVASER_USB_HYDRA_TRANSID_CANHE + 3: + case KVASER_USB_HYDRA_TRANSID_CANHE + 4: + channel = transid & 0x000f; + he = cmd->map_ch_res.he_addr; + card_data->channel_to_he[channel] = he; + break; + case KVASER_USB_HYDRA_TRANSID_SYSDBG: + card_data->sysdbg_he = cmd->map_ch_res.he_addr; + break; + default: + dev_warn(&dev->intf->dev, + "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n", + transid); + break; + } + + return 0; +} + +static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid, + u8 channel, const char *name) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + strcpy(cmd->map_ch_req.name, name); + cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER); + cmd->map_ch_req.channel = channel; + + kvaser_usb_hydra_set_cmd_transid(cmd, transid); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd); + if (err) + goto end; + + err = kvaser_usb_hydra_map_channel_resp(dev, cmd); + if (err) + goto end; + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + + kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->cap_res.status); + + if (*status != KVASER_USB_HYDRA_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: + case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: + case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: + value = le32_to_cpu(cmd->cap_res.value); + mask = le32_to_cpu(cmd->cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_ONE_SHOT; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + if (completion_done(&priv->start_comp) && + netif_queue_stopped(priv->netdev)) { + netif_wake_queue(priv->netdev); + } else { + netif_start_queue(priv->netdev); + complete(&priv->start_comp); + } +} + +static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + complete(&priv->stop_comp); +} + +static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + complete(&priv->flush_comp); +} + +static void +kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, + u8 bus_status, + const struct can_berr_counter *bec, + enum can_state *new_state) +{ + if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) { + *new_state = CAN_STATE_BUS_OFF; + } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) { + *new_state = CAN_STATE_ERROR_PASSIVE; + } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) { + if (bec->txerr >= 128 || bec->rxerr >= 128) { + netdev_warn(priv->netdev, + "ERR_ACTIVE but err tx=%u or rx=%u >=128\n", + bec->txerr, bec->rxerr); + *new_state = CAN_STATE_ERROR_PASSIVE; + } else if (bec->txerr >= 96 || bec->rxerr >= 96) { + *new_state = CAN_STATE_ERROR_WARNING; + } else { + *new_state = CAN_STATE_ERROR_ACTIVE; + } + } +} + +static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, + u8 bus_status, + const struct can_berr_counter *bec) +{ + struct net_device *netdev = priv->netdev; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats; + enum can_state new_state, old_state; + + old_state = priv->can.state; + + kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec, + &new_state); + + if (new_state == old_state) + return; + + /* Ignore state change if previous state was STOPPED and the new state + * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware + * does not distinguish between BUS_OFF and STOPPED. + */ + if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF) + return; + + skb = alloc_can_err_skb(netdev, &cf); + if (skb) { + enum can_state tx_state, rx_state; + + tx_state = (bec->txerr >= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (bec->txerr <= bec->rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + can_change_state(netdev, cf, tx_state, rx_state); + } + + if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) { + if (!priv->can.restart_ms) + kvaser_usb_hydra_send_simple_cmd_async + (priv, CMD_STOP_CHIP_REQ); + + can_bus_off(netdev); + } + + if (!skb) { + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + priv->can.can_stats.restarts++; + + cf->data[6] = bec->txerr; + cf->data[7] = bec->rxerr; + + stats = &netdev->stats; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct can_berr_counter bec; + u8 bus_status; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + bus_status = cmd->chip_state_event.bus_status; + bec.txerr = cmd->chip_state_event.tx_err_counter; + bec.rxerr = cmd->chip_state_event.rx_err_counter; + + kvaser_usb_hydra_update_state(priv, bus_status, &bec); + priv->bec.txerr = bec.txerr; + priv->bec.rxerr = bec.rxerr; +} + +static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* info1 will contain the offending cmd_no */ + switch (le16_to_cpu(cmd->error_event.info1)) { + case CMD_START_CHIP_REQ: + dev_warn(&dev->intf->dev, + "CMD_START_CHIP_REQ error in parameter\n"); + break; + + case CMD_STOP_CHIP_REQ: + dev_warn(&dev->intf->dev, + "CMD_STOP_CHIP_REQ error in parameter\n"); + break; + + case CMD_FLUSH_QUEUE: + dev_warn(&dev->intf->dev, + "CMD_FLUSH_QUEUE error in parameter\n"); + break; + + case CMD_SET_BUSPARAMS_REQ: + dev_warn(&dev->intf->dev, + "Set bittiming failed. Error in parameter\n"); + break; + + case CMD_SET_BUSPARAMS_FD_REQ: + dev_warn(&dev->intf->dev, + "Set data bittiming failed. Error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + le16_to_cpu(cmd->error_event.info1)); + break; + } +} + +static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->error_event.error_code) { + case KVASER_USB_HYDRA_ERROR_EVENT_PARAM: + kvaser_usb_hydra_error_event_parameter(dev, cmd); + break; + + case KVASER_USB_HYDRA_ERROR_EVENT_CAN: + /* Wrong channel mapping?! This should never happen! + * info1 will contain the offending cmd_no + */ + dev_err(&dev->intf->dev, + "Received CAN error event for cmd_no (%u)\n", + le16_to_cpu(cmd->error_event.info1)); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", + cmd->error_event.error_code); + break; + } +} + +static void +kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, + const struct kvaser_err_frame_data *err_frame_data, + ktime_t hwtstamp) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct can_berr_counter bec; + enum can_state new_state, old_state; + u8 bus_status; + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + bus_status = err_frame_data->bus_status; + bec.txerr = err_frame_data->tx_err_counter; + bec.rxerr = err_frame_data->rx_err_counter; + + old_state = priv->can.state; + kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec, + &new_state); + + skb = alloc_can_err_skb(netdev, &cf); + + if (new_state != old_state) { + if (skb) { + enum can_state tx_state, rx_state; + + tx_state = (bec.txerr >= bec.rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + rx_state = (bec.txerr <= bec.rxerr) ? + new_state : CAN_STATE_ERROR_ACTIVE; + + can_change_state(netdev, cf, tx_state, rx_state); + } + + if (new_state == CAN_STATE_BUS_OFF) { + if (!priv->can.restart_ms) + kvaser_usb_hydra_send_simple_cmd_async + (priv, CMD_STOP_CHIP_REQ); + + can_bus_off(netdev); + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + cf->can_id |= CAN_ERR_RESTARTED; + } + + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id |= CAN_ERR_BUSERROR; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + + priv->bec.txerr = bec.txerr; + priv->bec.rxerr = bec.rxerr; +} + +static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, + const struct kvaser_cmd_ext *cmd) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 flags; + + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) { + stats->rx_dropped++; + netdev_warn(netdev, "No memory left for err_skb\n"); + return; + } + + cf->can_id |= CAN_ERR_BUSERROR; + flags = le32_to_cpu(cmd->tx_ack.flags); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK) + cf->can_id |= CAN_ERR_ACK; + if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) { + cf->can_id |= CAN_ERR_LOSTARB; + priv->can.can_stats.arbitration_lost++; + } + + stats->tx_errors++; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_tx_urb_context *context; + struct kvaser_usb_net_priv *priv; + unsigned long irq_flags; + bool one_shot_fail = false; + u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + if (!netif_device_present(priv->netdev)) + return; + + if (cmd->header.cmd_no == CMD_EXTENDED) { + struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; + u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags); + + if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK | + KVASER_USB_HYDRA_CF_FLAG_ABL)) { + kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); + one_shot_fail = true; + } + } + + context = &priv->tx_contexts[transid % dev->max_tx_urbs]; + if (!one_shot_fail) { + struct net_device_stats *stats = &priv->netdev->stats; + + stats->tx_packets++; + stats->tx_bytes += can_dlc2len(context->dlc); + } + + spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); + + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags); +} + +static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv = NULL; + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats; + u8 flags; + ktime_t hwtstamp; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + stats = &priv->netdev->stats; + + flags = cmd->rx_can.flags; + hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { + kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, + hwtstamp); + return; + } + + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id = le32_to_cpu(cmd->rx_can.id); + + if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) { + cf->can_id &= CAN_EFF_MASK; + cf->can_id |= CAN_EFF_FLAG; + } else { + cf->can_id &= CAN_SFF_MASK; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); + + cf->can_dlc = get_can_dlc(cmd->rx_can.dlc); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, cmd->rx_can.data, cf->can_dlc); + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, + const struct kvaser_cmd_ext *cmd) +{ + struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd; + struct kvaser_usb_net_priv *priv; + struct canfd_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *shhwtstamps; + struct net_device_stats *stats; + u32 flags; + u8 dlc; + u32 kcan_header; + ktime_t hwtstamp; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd); + if (!priv) + return; + + stats = &priv->netdev->stats; + + kcan_header = le32_to_cpu(cmd->rx_can.kcan_header); + dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >> + KVASER_USB_KCAN_DATA_DLC_SHIFT; + + flags = le32_to_cpu(cmd->rx_can.flags); + hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { + kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, + hwtstamp); + return; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) + skb = alloc_canfd_skb(priv->netdev, &cf); + else + skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf); + + if (!skb) { + stats->rx_dropped++; + return; + } + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = hwtstamp; + + cf->can_id = le32_to_cpu(cmd->rx_can.id); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) { + cf->can_id &= CAN_EFF_MASK; + cf->can_id |= CAN_EFF_FLAG; + } else { + cf->can_id &= CAN_SFF_MASK; + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); + + if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) { + cf->len = can_dlc2len(get_canfd_dlc(dlc)); + if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS) + cf->flags |= CANFD_BRS; + if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) + cf->flags |= CANFD_ESI; + } else { + cf->len = get_can_dlc(dlc); + } + + if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + netif_rx(skb); +} + +static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->header.cmd_no) { + case CMD_START_CHIP_RESP: + kvaser_usb_hydra_start_chip_reply(dev, cmd); + break; + + case CMD_STOP_CHIP_RESP: + kvaser_usb_hydra_stop_chip_reply(dev, cmd); + break; + + case CMD_FLUSH_QUEUE_RESP: + kvaser_usb_hydra_flush_queue_reply(dev, cmd); + break; + + case CMD_CHIP_STATE_EVENT: + kvaser_usb_hydra_state_event(dev, cmd); + break; + + case CMD_ERROR_EVENT: + kvaser_usb_hydra_error_event(dev, cmd); + break; + + case CMD_TX_ACKNOWLEDGE: + kvaser_usb_hydra_tx_acknowledge(dev, cmd); + break; + + case CMD_RX_MESSAGE: + kvaser_usb_hydra_rx_msg_std(dev, cmd); + break; + + /* Ignored commands */ + case CMD_SET_BUSPARAMS_RESP: + case CMD_SET_BUSPARAMS_FD_RESP: + break; + + default: + dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", + cmd->header.cmd_no); + break; + } +} + +static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev, + const struct kvaser_cmd_ext *cmd) +{ + switch (cmd->cmd_no_ext) { + case CMD_TX_ACKNOWLEDGE_FD: + kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd); + break; + + case CMD_RX_MESSAGE_FD: + kvaser_usb_hydra_rx_msg_ext(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n", + cmd->header.cmd_no); + break; + } +} + +static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + if (cmd->header.cmd_no == CMD_EXTENDED) + kvaser_usb_hydra_handle_cmd_ext + (dev, (struct kvaser_cmd_ext *)cmd); + else + kvaser_usb_hydra_handle_cmd_std(dev, cmd); +} + +static void * +kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd_ext *cmd; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 dlc = can_len2dlc(cf->len); + u8 nbr_of_bytes = cf->len; + u32 flags; + u32 id; + u32 kcan_id; + u32 kcan_header; + + *frame_len = nbr_of_bytes; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC); + if (!cmd) + return NULL; + + kvaser_usb_hydra_set_cmd_dest_he + ((struct kvaser_cmd *)cmd, + dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid); + + cmd->header.cmd_no = CMD_EXTENDED; + cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD; + + *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) - + sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes, + 8); + + cmd->len = cpu_to_le16(*cmd_len); + + cmd->tx_can.databytes = nbr_of_bytes; + cmd->tx_can.dlc = dlc; + + if (cf->can_id & CAN_EFF_FLAG) { + id = cf->can_id & CAN_EFF_MASK; + flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID; + kcan_id = (cf->can_id & CAN_EFF_MASK) | + KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR; + } else { + id = cf->can_id & CAN_SFF_MASK; + flags = 0; + kcan_id = cf->can_id & CAN_SFF_MASK; + } + + if (cf->can_id & CAN_ERR_FLAG) + flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; + + kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) & + KVASER_USB_KCAN_DATA_DLC_MASK) | + KVASER_USB_KCAN_DATA_AREQ | + (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ? + KVASER_USB_KCAN_DATA_OSM : 0); + + if (can_is_canfd_skb(skb)) { + kcan_header |= KVASER_USB_KCAN_DATA_FDF | + (cf->flags & CANFD_BRS ? + KVASER_USB_KCAN_DATA_BRS : 0); + } else { + if (cf->can_id & CAN_RTR_FLAG) { + kcan_id |= KVASER_USB_KCAN_DATA_RTR; + cmd->tx_can.databytes = 0; + flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; + } + } + + cmd->tx_can.kcan_id = cpu_to_le32(kcan_id); + cmd->tx_can.id = cpu_to_le32(id); + cmd->tx_can.flags = cpu_to_le32(flags); + cmd->tx_can.kcan_header = cpu_to_le32(kcan_header); + + memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes); + + return cmd; +} + +static void * +kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + u32 flags; + u32 id; + + *frame_len = cf->can_dlc; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); + if (!cmd) + return NULL; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid(cmd, transid); + + cmd->header.cmd_no = CMD_TX_CAN_MESSAGE; + + *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8); + + if (cf->can_id & CAN_EFF_FLAG) { + id = (cf->can_id & CAN_EFF_MASK); + id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID; + } else { + id = cf->can_id & CAN_SFF_MASK; + } + + cmd->tx_can.dlc = cf->can_dlc; + + flags = (cf->can_id & CAN_EFF_FLAG ? + KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); + + if (cf->can_id & CAN_RTR_FLAG) + flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; + + flags |= (cf->can_id & CAN_ERR_FLAG ? + KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0); + + cmd->tx_can.id = cpu_to_le32(id); + cmd->tx_can.flags = flags; + + memcpy(cmd->tx_can.data, cf->data, *frame_len); + + return cmd; +} + +static int kvaser_usb_hydra_set_mode(struct net_device *netdev, + enum can_mode mode) +{ + int err = 0; + + switch (mode) { + case CAN_MODE_START: + /* CAN controller automatically recovers from BUS_OFF */ + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + struct kvaser_usb *dev = priv->dev; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; + cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); + cmd->set_busparams_req.sjw = (u8)sjw; + cmd->set_busparams_req.tseg1 = (u8)tseg1; + cmd->set_busparams_req.tseg2 = (u8)tseg2; + cmd->set_busparams_req.nsamples = 1; + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_cmd *cmd; + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + struct kvaser_usb *dev = priv->dev; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; + cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); + cmd->set_busparams_req.sjw_d = (u8)sjw; + cmd->set_busparams_req.tseg1_d = (u8)tseg1; + cmd->set_busparams_req.tseg2_d = (u8)tseg2; + cmd->set_busparams_req.nsamples_d = 1; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) + cmd->set_busparams_req.canfd_mode = + KVASER_USB_HYDRA_BUS_MODE_NONISO; + else + cmd->set_busparams_req.canfd_mode = + KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO; + } + + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + int err; + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, + CMD_GET_CHIP_STATE_REQ, + priv->channel); + if (err) + return err; + + *bec = priv->bec; + + return 0; +} + +static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) +{ + const struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *ep; + int i; + + iface_desc = &dev->intf->altsetting[0]; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + ep = &iface_desc->endpoint[i].desc; + + if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) && + ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR) + dev->bulk_in = ep; + + if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) && + ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR) + dev->bulk_out = ep; + + if (dev->bulk_in && dev->bulk_out) + return 0; + } + + return -ENODEV; +} + +static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) +{ + int err; + unsigned int i; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + + card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID; + spin_lock_init(&card_data->transid_lock); + + memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN); + card_data->usb_rx_leftover_len = 0; + spin_lock_init(&card_data->usb_rx_leftover_lock); + + memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL, + sizeof(card_data->channel_to_he)); + card_data->sysdbg_he = 0; + + for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { + err = kvaser_usb_hydra_map_channel + (dev, + (KVASER_USB_HYDRA_TRANSID_CANHE | i), + i, "CAN"); + if (err) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i); + return err; + } + } + + err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG, + 0, "SYSDBG"); + if (err) { + dev_err(&dev->intf->dev, + "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n"); + return err; + } + + return 0; +} + +static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ, + -1); + if (err) + return err; + + memset(&cmd, 0, sizeof(struct kvaser_cmd)); + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd); + if (err) + return err; + + dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS, + le16_to_cpu(cmd.sw_info.max_outstanding_tx)); + + return 0; +} + +static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) +{ + struct kvaser_cmd *cmd; + int err; + u32 flags; + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd->sw_detail_req.use_ext_cmd = 1; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); + + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + if (err) + goto end; + + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP, + cmd); + if (err) + goto end; + + dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version); + flags = le32_to_cpu(cmd->sw_detail_res.sw_flags); + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) { + dev_err(&dev->intf->dev, + "Bad firmware, device refuse to run!\n"); + err = -EINVAL; + goto end; + } + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA) + dev_info(&dev->intf->dev, "Beta firmware in use\n"); + + if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP) + card_data->capabilities |= KVASER_USB_CAP_EXT_CAP; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD) + card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD) + card_data->ctrlmode_supported |= CAN_CTRLMODE_FD; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO) + card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; + + if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M) + dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan; + else + dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc; + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1); + if (err) + return err; + + memset(&cmd, 0, sizeof(struct kvaser_cmd)); + err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd); + if (err) + return err; + + dev->nchannels = cmd.card_info.nchannels; + if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES) + return -EINVAL; + + return 0; +} + +static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade your device.\n"); + return 0; + } + + err = kvaser_usb_hydra_get_single_capability + (dev, + KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_hydra_get_single_capability + (dev, + KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n", + status); + + err = kvaser_usb_hydra_get_single_capability + (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + int err; + + if ((priv->can.ctrlmode & + (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) == + CAN_CTRLMODE_FD_NON_ISO) { + netdev_warn(priv->netdev, + "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n"); + return -EINVAL; + } + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN; + else + cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; + + err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + kfree(cmd); + + return err; +} + +static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->start_comp); + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->start_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->stop_comp); + + /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT + * see comment in kvaser_usb_hydra_update_state() + */ + priv->can.state = CAN_STATE_STOPPED; + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->stop_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->flush_comp); + + err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->flush_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +/* A single extended hydra command can be transmitted in multiple transfers + * We have to buffer partial hydra commands, and handle them on next callback. + */ +static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev, + void *buf, int len) +{ + unsigned long irq_flags; + struct kvaser_cmd *cmd; + int pos = 0; + size_t cmd_len; + struct kvaser_usb_dev_card_data_hydra *card_data = + &dev->card_data.hydra; + int usb_rx_leftover_len; + spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock; + + spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); + usb_rx_leftover_len = card_data->usb_rx_leftover_len; + if (usb_rx_leftover_len) { + int remaining_bytes; + + cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover; + + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + + remaining_bytes = min_t(unsigned int, len, + cmd_len - usb_rx_leftover_len); + /* Make sure we do not overflow usb_rx_leftover */ + if (remaining_bytes + usb_rx_leftover_len > + KVASER_USB_HYDRA_MAX_CMD_LEN) { + dev_err(&dev->intf->dev, "Format error\n"); + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + return; + } + + memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf, + remaining_bytes); + pos += remaining_bytes; + + if (remaining_bytes + usb_rx_leftover_len == cmd_len) { + kvaser_usb_hydra_handle_cmd(dev, cmd); + usb_rx_leftover_len = 0; + } else { + /* Command still not complete */ + usb_rx_leftover_len += remaining_bytes; + } + card_data->usb_rx_leftover_len = usb_rx_leftover_len; + } + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + + while (pos < len) { + cmd = buf + pos; + + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + + if (pos + cmd_len > len) { + /* We got first part of a command */ + int leftover_bytes; + + leftover_bytes = len - pos; + /* Make sure we do not overflow usb_rx_leftover */ + if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) { + dev_err(&dev->intf->dev, "Format error\n"); + return; + } + spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); + memcpy(card_data->usb_rx_leftover, buf + pos, + leftover_bytes); + card_data->usb_rx_leftover_len = leftover_bytes; + spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); + break; + } + + kvaser_usb_hydra_handle_cmd(dev, cmd); + pos += cmd_len; + } +} + +static void * +kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + void *buf; + + if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD) + buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len, + cmd_len, transid); + else + buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len, + cmd_len, transid); + + return buf; +} + +const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { + .dev_set_mode = kvaser_usb_hydra_set_mode, + .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, + .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, + .dev_init_card = kvaser_usb_hydra_init_card, + .dev_get_software_info = kvaser_usb_hydra_get_software_info, + .dev_get_software_details = kvaser_usb_hydra_get_software_details, + .dev_get_card_info = kvaser_usb_hydra_get_card_info, + .dev_get_capabilities = kvaser_usb_hydra_get_capabilities, + .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode, + .dev_start_chip = kvaser_usb_hydra_start_chip, + .dev_stop_chip = kvaser_usb_hydra_stop_chip, + .dev_reset_chip = NULL, + .dev_flush_queue = kvaser_usb_hydra_flush_queue, + .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback, + .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { + .clock = { + .freq = 80000000, + }, + .timestamp_freq = 80, + .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, + .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { + .clock = { + .freq = 24000000, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, +}; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c new file mode 100644 index 000000000000..07d2f3aa2c02 --- /dev/null +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -0,0 +1,1358 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Parts of this driver are based on the following: + * - Kvaser linux leaf driver (version 4.78) + * - CAN driver for esd CAN-USB/2 + * - Kvaser linux usbcanII driver (version 5.3) + * + * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. + * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh + * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> + * Copyright (C) 2015 Valeo S.A. + */ + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/can/netlink.h> + +#include "kvaser_usb.h" + +/* Forward declaration */ +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg; + +#define CAN_USB_CLOCK 8000000 +#define MAX_USBCAN_NET_DEVICES 2 + +/* Command header size */ +#define CMD_HEADER_LEN 2 + +/* Kvaser CAN message flags */ +#define MSG_FLAG_ERROR_FRAME BIT(0) +#define MSG_FLAG_OVERRUN BIT(1) +#define MSG_FLAG_NERR BIT(2) +#define MSG_FLAG_WAKEUP BIT(3) +#define MSG_FLAG_REMOTE_FRAME BIT(4) +#define MSG_FLAG_RESERVED BIT(5) +#define MSG_FLAG_TX_ACK BIT(6) +#define MSG_FLAG_TX_REQUEST BIT(7) + +/* CAN states (M16C CxSTRH register) */ +#define M16C_STATE_BUS_RESET BIT(0) +#define M16C_STATE_BUS_ERROR BIT(4) +#define M16C_STATE_BUS_PASSIVE BIT(5) +#define M16C_STATE_BUS_OFF BIT(6) + +/* Leaf/usbcan command ids */ +#define CMD_RX_STD_MESSAGE 12 +#define CMD_TX_STD_MESSAGE 13 +#define CMD_RX_EXT_MESSAGE 14 +#define CMD_TX_EXT_MESSAGE 15 +#define CMD_SET_BUS_PARAMS 16 +#define CMD_CHIP_STATE_EVENT 20 +#define CMD_SET_CTRL_MODE 21 +#define CMD_RESET_CHIP 24 +#define CMD_START_CHIP 26 +#define CMD_START_CHIP_REPLY 27 +#define CMD_STOP_CHIP 28 +#define CMD_STOP_CHIP_REPLY 29 + +#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 + +#define CMD_GET_CARD_INFO 34 +#define CMD_GET_CARD_INFO_REPLY 35 +#define CMD_GET_SOFTWARE_INFO 38 +#define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_FLUSH_QUEUE 48 +#define CMD_TX_ACKNOWLEDGE 50 +#define CMD_CAN_ERROR_EVENT 51 +#define CMD_FLUSH_QUEUE_REPLY 68 + +#define CMD_LEAF_LOG_MESSAGE 106 + +/* error factors */ +#define M16C_EF_ACKE BIT(0) +#define M16C_EF_CRCE BIT(1) +#define M16C_EF_FORME BIT(2) +#define M16C_EF_STFE BIT(3) +#define M16C_EF_BITE0 BIT(4) +#define M16C_EF_BITE1 BIT(5) +#define M16C_EF_RCVE BIT(6) +#define M16C_EF_TRE BIT(7) + +/* Only Leaf-based devices can report M16C error factors, + * thus define our own error status flags for USBCANII + */ +#define USBCAN_ERROR_STATE_NONE 0 +#define USBCAN_ERROR_STATE_TX_ERROR BIT(0) +#define USBCAN_ERROR_STATE_RX_ERROR BIT(1) +#define USBCAN_ERROR_STATE_BUSERROR BIT(2) + +/* bittiming parameters */ +#define KVASER_USB_TSEG1_MIN 1 +#define KVASER_USB_TSEG1_MAX 16 +#define KVASER_USB_TSEG2_MIN 1 +#define KVASER_USB_TSEG2_MAX 8 +#define KVASER_USB_SJW_MAX 4 +#define KVASER_USB_BRP_MIN 1 +#define KVASER_USB_BRP_MAX 64 +#define KVASER_USB_BRP_INC 1 + +/* ctrl modes */ +#define KVASER_CTRL_MODE_NORMAL 1 +#define KVASER_CTRL_MODE_SILENT 2 +#define KVASER_CTRL_MODE_SELFRECEPTION 3 +#define KVASER_CTRL_MODE_OFF 4 + +/* Extended CAN identifier flag */ +#define KVASER_EXTENDED_FRAME BIT(31) + +struct kvaser_cmd_simple { + u8 tid; + u8 channel; +} __packed; + +struct kvaser_cmd_cardinfo { + u8 tid; + u8 nchannels; + __le32 serial_number; + __le32 padding0; + __le32 clock_resolution; + __le32 mfgdate; + u8 ean[8]; + u8 hw_revision; + union { + struct { + u8 usb_hs_mode; + } __packed leaf1; + struct { + u8 padding; + } __packed usbcan1; + } __packed; + __le16 padding1; +} __packed; + +struct leaf_cmd_softinfo { + u8 tid; + u8 padding0; + __le32 sw_options; + __le32 fw_version; + __le16 max_outstanding_tx; + __le16 padding1[9]; +} __packed; + +struct usbcan_cmd_softinfo { + u8 tid; + u8 fw_name[5]; + __le16 max_outstanding_tx; + u8 padding[6]; + __le32 fw_version; + __le16 checksum; + __le16 sw_options; +} __packed; + +struct kvaser_cmd_busparams { + u8 tid; + u8 channel; + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 no_samp; +} __packed; + +struct kvaser_cmd_tx_can { + u8 channel; + u8 tid; + u8 data[14]; + union { + struct { + u8 padding; + u8 flags; + } __packed leaf; + struct { + u8 flags; + u8 padding; + } __packed usbcan; + } __packed; +} __packed; + +struct kvaser_cmd_rx_can_header { + u8 channel; + u8 flag; +} __packed; + +struct leaf_cmd_rx_can { + u8 channel; + u8 flag; + + __le16 time[3]; + u8 data[14]; +} __packed; + +struct usbcan_cmd_rx_can { + u8 channel; + u8 flag; + + u8 data[14]; + __le16 time; +} __packed; + +struct leaf_cmd_chip_state_event { + u8 tid; + u8 channel; + + __le16 time[3]; + u8 tx_errors_count; + u8 rx_errors_count; + + u8 status; + u8 padding[3]; +} __packed; + +struct usbcan_cmd_chip_state_event { + u8 tid; + u8 channel; + + u8 tx_errors_count; + u8 rx_errors_count; + __le16 time; + + u8 status; + u8 padding[3]; +} __packed; + +struct kvaser_cmd_tx_acknowledge_header { + u8 channel; + u8 tid; +} __packed; + +struct leaf_cmd_error_event { + u8 tid; + u8 flags; + __le16 time[3]; + u8 channel; + u8 padding; + u8 tx_errors_count; + u8 rx_errors_count; + u8 status; + u8 error_factor; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 padding; + u8 tx_errors_count_ch0; + u8 rx_errors_count_ch0; + u8 tx_errors_count_ch1; + u8 rx_errors_count_ch1; + u8 status_ch0; + u8 status_ch1; + __le16 time; +} __packed; + +struct kvaser_cmd_ctrl_mode { + u8 tid; + u8 channel; + u8 ctrl_mode; + u8 padding[3]; +} __packed; + +struct kvaser_cmd_flush_queue { + u8 tid; + u8 channel; + u8 flags; + u8 padding[3]; +} __packed; + +struct leaf_cmd_log_message { + u8 channel; + u8 flags; + __le16 time[3]; + u8 dlc; + u8 time_offset; + __le32 id; + u8 data[8]; +} __packed; + +struct kvaser_cmd { + u8 len; + u8 id; + union { + struct kvaser_cmd_simple simple; + struct kvaser_cmd_cardinfo cardinfo; + struct kvaser_cmd_busparams busparams; + + struct kvaser_cmd_rx_can_header rx_can_header; + struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; + + union { + struct leaf_cmd_softinfo softinfo; + struct leaf_cmd_rx_can rx_can; + struct leaf_cmd_chip_state_event chip_state_event; + struct leaf_cmd_error_event error_event; + struct leaf_cmd_log_message log_message; + } __packed leaf; + + union { + struct usbcan_cmd_softinfo softinfo; + struct usbcan_cmd_rx_can rx_can; + struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_error_event error_event; + } __packed usbcan; + + struct kvaser_cmd_tx_can tx_can; + struct kvaser_cmd_ctrl_mode ctrl_mode; + struct kvaser_cmd_flush_queue flush_queue; + } u; +} __packed; + +/* Summary of a kvaser error event, for a unified Leaf/Usbcan error + * handling. Some discrepancies between the two families exist: + * + * - USBCAN firmware does not report M16C "error factors" + * - USBCAN controllers has difficulties reporting if the raised error + * event is for ch0 or ch1. They leave such arbitration to the OS + * driver by letting it compare error counters with previous values + * and decide the error event's channel. Thus for USBCAN, the channel + * field is only advisory. + */ +struct kvaser_usb_err_summary { + u8 channel, status, txerr, rxerr; + union { + struct { + u8 error_factor; + } leaf; + struct { + u8 other_ch_status; + u8 error_state; + } usbcan; + }; +}; + +static void * +kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, + const struct sk_buff *skb, int *frame_len, + int *cmd_len, u16 transid) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + u8 *cmd_tx_can_flags = NULL; /* GCC */ + struct can_frame *cf = (struct can_frame *)skb->data; + + *frame_len = cf->can_dlc; + + cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + if (cmd) { + cmd->u.tx_can.tid = transid & 0xff; + cmd->len = *cmd_len = CMD_HEADER_LEN + + sizeof(struct kvaser_cmd_tx_can); + cmd->u.tx_can.channel = priv->channel; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; + break; + case KVASER_USBCAN: + cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; + break; + } + + *cmd_tx_can_flags = 0; + + if (cf->can_id & CAN_EFF_FLAG) { + cmd->id = CMD_TX_EXT_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; + cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; + cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; + cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; + cmd->u.tx_can.data[4] = cf->can_id & 0x3f; + } else { + cmd->id = CMD_TX_STD_MESSAGE; + cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; + cmd->u.tx_can.data[1] = cf->can_id & 0x3f; + } + + cmd->u.tx_can.data[5] = cf->can_dlc; + memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); + + if (cf->can_id & CAN_RTR_FLAG) + *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + } + return cmd; +} + +static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, + struct kvaser_cmd *cmd) +{ + struct kvaser_cmd *tmp; + void *buf; + int actual_len; + int err; + int pos; + unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); + + buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + do { + err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, + &actual_len); + if (err < 0) + goto end; + + pos = 0; + while (pos <= actual_len - CMD_HEADER_LEN) { + tmp = buf + pos; + + /* Handle commands crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + le16_to_cpu + (dev->bulk_in->wMaxPacketSize)); + continue; + } + + if (pos + tmp->len > actual_len) { + dev_err_ratelimited(&dev->intf->dev, + "Format error\n"); + break; + } + + if (tmp->id == id) { + memcpy(cmd, tmp, tmp->len); + goto end; + } + + pos += tmp->len; + } + } while (time_before(jiffies, to)); + + err = -EINVAL; + +end: + kfree(buf); + + return err; +} + +static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, + u8 cmd_id, int channel) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = cmd_id; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->u.simple.channel = channel; + cmd->u.simple.tid = 0xff; + + rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); + if (err) + return err; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); + if (err) + return err; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version); + dev->max_tx_urbs = + le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx); + break; + case KVASER_USBCAN: + dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); + dev->max_tx_urbs = + le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); + break; + } + + return 0; +} + +static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev) +{ + int err; + int retry = 3; + + /* On some x86 laptops, plugging a Kvaser device again after + * an unplug makes the firmware always ignore the very first + * command. For such a case, provide some room for retries + * instead of completely exiting the driver. + */ + do { + err = kvaser_usb_leaf_get_software_info_inner(dev); + } while (--retry && err == -ETIMEDOUT); + + return err; +} + +static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) +{ + struct kvaser_cmd cmd; + int err; + + err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); + if (err) + return err; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); + if (err) + return err; + + dev->nchannels = cmd.u.cardinfo.nchannels; + if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || + (dev->card_data.leaf.family == KVASER_USBCAN && + dev->nchannels > MAX_USBCAN_NET_DEVICES)) + return -EINVAL; + + return 0; +} + +static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct net_device_stats *stats; + struct kvaser_usb_tx_urb_context *context; + struct kvaser_usb_net_priv *priv; + unsigned long flags; + u8 channel, tid; + + channel = cmd->u.tx_acknowledge_header.channel; + tid = cmd->u.tx_acknowledge_header.tid; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + if (!netif_device_present(priv->netdev)) + return; + + stats = &priv->netdev->stats; + + context = &priv->tx_contexts[tid % dev->max_tx_urbs]; + + /* Sometimes the state change doesn't come after a bus-off event */ + if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_RESTARTED; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } else { + netdev_err(priv->netdev, + "No memory left for err_skb\n"); + } + + priv->can.can_stats.restarts++; + netif_carrier_on(priv->netdev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + stats->tx_packets++; + stats->tx_bytes += context->dlc; + + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = dev->max_tx_urbs; + --priv->active_tx_contexts; + netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); +} + +static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, + u8 cmd_id) +{ + struct kvaser_cmd *cmd; + int err; + + cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); + cmd->id = cmd_id; + cmd->u.simple.channel = priv->channel; + + err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len); + if (err) + kfree(cmd); + + return err; +} + +static void +kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, + const struct kvaser_usb_err_summary *es, + struct can_frame *cf) +{ + struct kvaser_usb *dev = priv->dev; + struct net_device_stats *stats = &priv->netdev->stats; + enum can_state cur_state, new_state, tx_state, rx_state; + + netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); + + new_state = priv->can.state; + cur_state = priv->can.state; + + if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { + new_state = CAN_STATE_BUS_OFF; + } else if (es->status & M16C_STATE_BUS_PASSIVE) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->status & M16C_STATE_BUS_ERROR) { + /* Guard against spurious error events after a busoff */ + if (cur_state < CAN_STATE_BUS_OFF) { + if (es->txerr >= 128 || es->rxerr >= 128) + new_state = CAN_STATE_ERROR_PASSIVE; + else if (es->txerr >= 96 || es->rxerr >= 96) + new_state = CAN_STATE_ERROR_WARNING; + else if (cur_state > CAN_STATE_ERROR_ACTIVE) + new_state = CAN_STATE_ERROR_ACTIVE; + } + } + + if (!es->status) + new_state = CAN_STATE_ERROR_ACTIVE; + + if (new_state != cur_state) { + tx_state = (es->txerr >= es->rxerr) ? new_state : 0; + rx_state = (es->txerr <= es->rxerr) ? new_state : 0; + + can_change_state(priv->netdev, cf, tx_state, rx_state); + } + + if (priv->can.restart_ms && + cur_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) + priv->can.can_stats.restarts++; + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + if (es->leaf.error_factor) { + priv->can.can_stats.bus_error++; + stats->rx_errors++; + } + break; + case KVASER_USBCAN: + if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) + stats->tx_errors++; + if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) + stats->rx_errors++; + if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) + priv->can.can_stats.bus_error++; + break; + } + + priv->bec.txerr = es->txerr; + priv->bec.rxerr = es->rxerr; +} + +static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, + const struct kvaser_usb_err_summary *es) +{ + struct can_frame *cf; + struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, + .can_dlc = CAN_ERR_DLC }; + struct sk_buff *skb; + struct net_device_stats *stats; + struct kvaser_usb_net_priv *priv; + enum can_state old_state, new_state; + + if (es->channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", es->channel); + return; + } + + priv = dev->nets[es->channel]; + stats = &priv->netdev->stats; + + /* Update all of the CAN interface's state and error counters before + * trying any memory allocation that can actually fail with -ENOMEM. + * + * We send a temporary stack-allocated error CAN frame to + * can_change_state() for the very same reason. + * + * TODO: Split can_change_state() responsibility between updating the + * CAN interface's state and counters, and the setting up of CAN error + * frame ID and data to userspace. Remove stack allocation afterwards. + */ + old_state = priv->can.state; + kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); + new_state = priv->can.state; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + memcpy(cf, &tmp_cf, sizeof(*cf)); + + if (new_state != old_state) { + if (es->status & + (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { + if (!priv->can.restart_ms) + kvaser_usb_leaf_simple_cmd_async(priv, + CMD_STOP_CHIP); + netif_carrier_off(priv->netdev); + } + + if (priv->can.restart_ms && + old_state >= CAN_STATE_BUS_OFF && + new_state < CAN_STATE_BUS_OFF) { + cf->can_id |= CAN_ERR_RESTARTED; + netif_carrier_on(priv->netdev); + } + } + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + if (es->leaf.error_factor) { + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; + + if (es->leaf.error_factor & M16C_EF_ACKE) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (es->leaf.error_factor & M16C_EF_CRCE) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (es->leaf.error_factor & M16C_EF_FORME) + cf->data[2] |= CAN_ERR_PROT_FORM; + if (es->leaf.error_factor & M16C_EF_STFE) + cf->data[2] |= CAN_ERR_PROT_STUFF; + if (es->leaf.error_factor & M16C_EF_BITE0) + cf->data[2] |= CAN_ERR_PROT_BIT0; + if (es->leaf.error_factor & M16C_EF_BITE1) + cf->data[2] |= CAN_ERR_PROT_BIT1; + if (es->leaf.error_factor & M16C_EF_TRE) + cf->data[2] |= CAN_ERR_PROT_TX; + } + break; + case KVASER_USBCAN: + if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) + cf->can_id |= CAN_ERR_BUSERROR; + break; + } + + cf->data[6] = es->txerr; + cf->data[7] = es->rxerr; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +/* For USBCAN, report error to userspace if the channels's errors counter + * has changed, or we're the only channel seeing a bus error state. + */ +static void +kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, + struct kvaser_usb_err_summary *es) +{ + struct kvaser_usb_net_priv *priv; + unsigned int channel; + bool report_error; + + channel = es->channel; + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + report_error = false; + + if (es->txerr != priv->bec.txerr) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; + report_error = true; + } + if (es->rxerr != priv->bec.rxerr) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; + report_error = true; + } + if ((es->status & M16C_STATE_BUS_ERROR) && + !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { + es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; + report_error = true; + } + + if (report_error) + kvaser_usb_leaf_rx_error(dev, es); +} + +static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_err_summary es = { }; + + switch (cmd->id) { + /* Sometimes errors are sent as unsolicited chip state events */ + case CMD_CHIP_STATE_EVENT: + es.channel = cmd->u.usbcan.chip_state_event.channel; + es.status = cmd->u.usbcan.chip_state_event.status; + es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + break; + + case CMD_CAN_ERROR_EVENT: + es.channel = 0; + es.status = cmd->u.usbcan.error_event.status_ch0; + es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.usbcan.other_ch_status = + cmd->u.usbcan.error_event.status_ch1; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + + /* The USBCAN firmware supports up to 2 channels. + * Now that ch0 was checked, check if ch1 has any errors. + */ + if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { + es.channel = 1; + es.status = cmd->u.usbcan.error_event.status_ch1; + es.txerr = + cmd->u.usbcan.error_event.tx_errors_count_ch1; + es.rxerr = + cmd->u.usbcan.error_event.rx_errors_count_ch1; + es.usbcan.other_ch_status = + cmd->u.usbcan.error_event.status_ch0; + kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); + } + break; + + default: + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); + } +} + +static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_err_summary es = { }; + + switch (cmd->id) { + case CMD_CAN_ERROR_EVENT: + es.channel = cmd->u.leaf.error_event.channel; + es.status = cmd->u.leaf.error_event.status; + es.txerr = cmd->u.leaf.error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + break; + case CMD_LEAF_LOG_MESSAGE: + es.channel = cmd->u.leaf.log_message.channel; + es.status = cmd->u.leaf.log_message.data[0]; + es.txerr = cmd->u.leaf.log_message.data[2]; + es.rxerr = cmd->u.leaf.log_message.data[3]; + es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; + break; + case CMD_CHIP_STATE_EVENT: + es.channel = cmd->u.leaf.chip_state_event.channel; + es.status = cmd->u.leaf.chip_state_event.status; + es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; + es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; + es.leaf.error_factor = 0; + break; + default: + dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); + return; + } + + kvaser_usb_leaf_rx_error(dev, &es); +} + +static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv, + const struct kvaser_cmd *cmd) +{ + if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + MSG_FLAG_NERR)) { + struct net_device_stats *stats = &priv->netdev->stats; + + netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", + cmd->u.rx_can_header.flag); + + stats->rx_errors++; + return; + } + + if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) + kvaser_usb_can_rx_over_error(priv->netdev); +} + +static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats; + u8 channel = cmd->u.rx_can_header.channel; + const u8 *rx_data = NULL; /* GCC */ + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + stats = &priv->netdev->stats; + + if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && + (dev->card_data.leaf.family == KVASER_LEAF && + cmd->id == CMD_LEAF_LOG_MESSAGE)) { + kvaser_usb_leaf_leaf_rx_error(dev, cmd); + return; + } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | + MSG_FLAG_NERR | + MSG_FLAG_OVERRUN)) { + kvaser_usb_leaf_rx_can_err(priv, cmd); + return; + } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { + netdev_warn(priv->netdev, + "Unhandled frame (flags: 0x%02x)\n", + cmd->u.rx_can_header.flag); + return; + } + + switch (dev->card_data.leaf.family) { + case KVASER_LEAF: + rx_data = cmd->u.leaf.rx_can.data; + break; + case KVASER_USBCAN: + rx_data = cmd->u.usbcan.rx_can.data; + break; + } + + skb = alloc_can_skb(priv->netdev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id == + CMD_LEAF_LOG_MESSAGE) { + cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); + if (cf->can_id & KVASER_EXTENDED_FRAME) + cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; + else + cf->can_id &= CAN_SFF_MASK; + + cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); + + if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &cmd->u.leaf.log_message.data, + cf->can_dlc); + } else { + cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); + + if (cmd->id == CMD_RX_EXT_MESSAGE) { + cf->can_id <<= 18; + cf->can_id |= ((rx_data[2] & 0x0f) << 14) | + ((rx_data[3] & 0xff) << 6) | + (rx_data[4] & 0x3f); + cf->can_id |= CAN_EFF_FLAG; + } + + cf->can_dlc = get_can_dlc(rx_data[5]); + + if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) + cf->can_id |= CAN_RTR_FLAG; + else + memcpy(cf->data, &rx_data[6], cf->can_dlc); + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.simple.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + if (completion_done(&priv->start_comp) && + netif_queue_stopped(priv->netdev)) { + netif_wake_queue(priv->netdev); + } else { + netif_start_queue(priv->netdev); + complete(&priv->start_comp); + } +} + +static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.simple.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + + complete(&priv->stop_comp); +} + +static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + switch (cmd->id) { + case CMD_START_CHIP_REPLY: + kvaser_usb_leaf_start_chip_reply(dev, cmd); + break; + + case CMD_STOP_CHIP_REPLY: + kvaser_usb_leaf_stop_chip_reply(dev, cmd); + break; + + case CMD_RX_STD_MESSAGE: + case CMD_RX_EXT_MESSAGE: + kvaser_usb_leaf_rx_can_msg(dev, cmd); + break; + + case CMD_LEAF_LOG_MESSAGE: + if (dev->card_data.leaf.family != KVASER_LEAF) + goto warn; + kvaser_usb_leaf_rx_can_msg(dev, cmd); + break; + + case CMD_CHIP_STATE_EVENT: + case CMD_CAN_ERROR_EVENT: + if (dev->card_data.leaf.family == KVASER_LEAF) + kvaser_usb_leaf_leaf_rx_error(dev, cmd); + else + kvaser_usb_leaf_usbcan_rx_error(dev, cmd); + break; + + case CMD_TX_ACKNOWLEDGE: + kvaser_usb_leaf_tx_acknowledge(dev, cmd); + break; + + /* Ignored commands */ + case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: + if (dev->card_data.leaf.family != KVASER_USBCAN) + goto warn; + break; + + case CMD_FLUSH_QUEUE_REPLY: + if (dev->card_data.leaf.family != KVASER_LEAF) + goto warn; + break; + + default: +warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); + break; + } +} + +static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev, + void *buf, int len) +{ + struct kvaser_cmd *cmd; + int pos = 0; + + while (pos <= len - CMD_HEADER_LEN) { + cmd = buf + pos; + + /* The Kvaser firmware can only read and write commands that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (cmd->len == 0) { + pos = round_up(pos, le16_to_cpu + (dev->bulk_in->wMaxPacketSize)); + continue; + } + + if (pos + cmd->len > len) { + dev_err_ratelimited(&dev->intf->dev, "Format error\n"); + break; + } + + kvaser_usb_leaf_handle_command(dev, cmd); + pos += cmd->len; + } +} + +static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_SET_CTRL_MODE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); + cmd->u.ctrl_mode.tid = 0xff; + cmd->u.ctrl_mode.channel = priv->channel; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; + else + cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; + + rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->start_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->start_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) +{ + int err; + + init_completion(&priv->stop_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->stop_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + +static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel) +{ + return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel); +} + +static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_FLUSH_QUEUE; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); + cmd->u.flush_queue.channel = priv->channel; + cmd->u.flush_queue.flags = 0x00; + + rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + + dev->cfg = &kvaser_usb_leaf_dev_cfg; + card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + + return 0; +} + +static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = { + .name = "kvaser_usb", + .tseg1_min = KVASER_USB_TSEG1_MIN, + .tseg1_max = KVASER_USB_TSEG1_MAX, + .tseg2_min = KVASER_USB_TSEG2_MIN, + .tseg2_max = KVASER_USB_TSEG2_MAX, + .sjw_max = KVASER_USB_SJW_MAX, + .brp_min = KVASER_USB_BRP_MIN, + .brp_max = KVASER_USB_BRP_MAX, + .brp_inc = KVASER_USB_BRP_INC, +}; + +static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct can_bittiming *bt = &priv->can.bittiming; + struct kvaser_usb *dev = priv->dev; + struct kvaser_cmd *cmd; + int rc; + + cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_SET_BUS_PARAMS; + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); + cmd->u.busparams.channel = priv->channel; + cmd->u.busparams.tid = 0xff; + cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); + cmd->u.busparams.sjw = bt->sjw; + cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; + cmd->u.busparams.tseg2 = bt->phase_seg2; + + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + cmd->u.busparams.no_samp = 3; + else + cmd->u.busparams.no_samp = 1; + + rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); + + kfree(cmd); + return rc; +} + +static int kvaser_usb_leaf_set_mode(struct net_device *netdev, + enum can_mode mode) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + int err; + + switch (mode) { + case CAN_MODE_START: + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev, + struct can_berr_counter *bec) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + + *bec = priv->bec; + + return 0; +} + +static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) +{ + const struct usb_host_interface *iface_desc; + struct usb_endpoint_descriptor *endpoint; + int i; + + iface_desc = &dev->intf->altsetting[0]; + + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) + dev->bulk_in = endpoint; + + if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) + dev->bulk_out = endpoint; + + /* use first bulk endpoint for in and out */ + if (dev->bulk_in && dev->bulk_out) + return 0; + } + + return -ENODEV; +} + +const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { + .dev_set_mode = kvaser_usb_leaf_set_mode, + .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_set_data_bittiming = NULL, + .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, + .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, + .dev_init_card = kvaser_usb_leaf_init_card, + .dev_get_software_info = kvaser_usb_leaf_get_software_info, + .dev_get_software_details = NULL, + .dev_get_card_info = kvaser_usb_leaf_get_card_info, + .dev_get_capabilities = NULL, + .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, + .dev_start_chip = kvaser_usb_leaf_start_chip, + .dev_stop_chip = kvaser_usb_leaf_stop_chip, + .dev_reset_chip = kvaser_usb_leaf_reset_chip, + .dev_flush_queue = kvaser_usb_leaf_flush_queue, + .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, + .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, +}; + +static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = { + .clock = { + .freq = CAN_USB_CLOCK, + }, + .timestamp_freq = 1, + .bittiming_const = &kvaser_usb_leaf_bittiming_const, +}; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index f530a80f5051..13238a72a338 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -423,6 +423,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, new_state = CAN_STATE_ERROR_WARNING; break; } + /* else: fall through */ case CAN_STATE_ERROR_WARNING: if (n & PCAN_USB_ERROR_BUS_HEAVY) { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 50e911428638..611f9d31be5d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -353,6 +353,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); + /* fall through */ case -ENOENT: /* cable unplugged */ stats->tx_dropped++; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 0105fbfea273..d516def846ab 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -141,8 +141,10 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; + /* fall through */ case PCAN_USBPRO_TXMSG4: i += 4; + /* fall through */ case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c new file mode 100644 index 000000000000..0678a38b1af4 --- /dev/null +++ b/drivers/net/can/usb/ucan.c @@ -0,0 +1,1613 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Driver for Theobroma Systems UCAN devices, Protocol Version 3 + * + * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH + * + * + * General Description: + * + * The USB Device uses three Endpoints: + * + * CONTROL Endpoint: Is used the setup the device (start, stop, + * info, configure). + * + * IN Endpoint: The device sends CAN Frame Messages and Device + * Information using the IN endpoint. + * + * OUT Endpoint: The driver sends configuration requests, and CAN + * Frames on the out endpoint. + * + * Error Handling: + * + * If error reporting is turned on the device encodes error into CAN + * error frames (see uapi/linux/can/error.h) and sends it using the + * IN Endpoint. The driver updates statistics and forward it. + */ + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/signal.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> + +#define UCAN_DRIVER_NAME "ucan" +#define UCAN_MAX_RX_URBS 8 +/* the CAN controller needs a while to enable/disable the bus */ +#define UCAN_USB_CTL_PIPE_TIMEOUT 1000 +/* this driver currently supports protocol version 3 only */ +#define UCAN_PROTOCOL_VERSION_MIN 3 +#define UCAN_PROTOCOL_VERSION_MAX 3 + +/* UCAN Message Definitions + * ------------------------ + * + * ucan_message_out_t and ucan_message_in_t define the messages + * transmitted on the OUT and IN endpoint. + * + * Multibyte fields are transmitted with little endianness + * + * INTR Endpoint: a single uint32_t storing the current space in the fifo + * + * OUT Endpoint: single message of type ucan_message_out_t is + * transmitted on the out endpoint + * + * IN Endpoint: multiple messages ucan_message_in_t concateted in + * the following way: + * + * m[n].len <=> the length if message n(including the header in bytes) + * m[n] is is aligned to a 4 byte boundary, hence + * offset(m[0]) := 0; + * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3 + * + * this implies that + * offset(m[n]) % 4 <=> 0 + */ + +/* Device Global Commands */ +enum { + UCAN_DEVICE_GET_FW_STRING = 0, +}; + +/* UCAN Commands */ +enum { + /* start the can transceiver - val defines the operation mode */ + UCAN_COMMAND_START = 0, + /* cancel pending transmissions and stop the can transceiver */ + UCAN_COMMAND_STOP = 1, + /* send can transceiver into low-power sleep mode */ + UCAN_COMMAND_SLEEP = 2, + /* wake up can transceiver from low-power sleep mode */ + UCAN_COMMAND_WAKEUP = 3, + /* reset the can transceiver */ + UCAN_COMMAND_RESET = 4, + /* get piece of info from the can transceiver - subcmd defines what + * piece + */ + UCAN_COMMAND_GET = 5, + /* clear or disable hardware filter - subcmd defines which of the two */ + UCAN_COMMAND_FILTER = 6, + /* Setup bittiming */ + UCAN_COMMAND_SET_BITTIMING = 7, + /* recover from bus-off state */ + UCAN_COMMAND_RESTART = 8, +}; + +/* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap). + * Undefined bits must be set to 0. + */ +enum { + UCAN_MODE_LOOPBACK = BIT(0), + UCAN_MODE_SILENT = BIT(1), + UCAN_MODE_3_SAMPLES = BIT(2), + UCAN_MODE_ONE_SHOT = BIT(3), + UCAN_MODE_BERR_REPORT = BIT(4), +}; + +/* UCAN_COMMAND_GET subcommands */ +enum { + UCAN_COMMAND_GET_INFO = 0, + UCAN_COMMAND_GET_PROTOCOL_VERSION = 1, +}; + +/* UCAN_COMMAND_FILTER subcommands */ +enum { + UCAN_FILTER_CLEAR = 0, + UCAN_FILTER_DISABLE = 1, + UCAN_FILTER_ENABLE = 2, +}; + +/* OUT endpoint message types */ +enum { + UCAN_OUT_TX = 2, /* transmit a CAN frame */ +}; + +/* IN endpoint message types */ +enum { + UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */ + UCAN_IN_RX = 2, /* CAN frame received */ +}; + +struct ucan_ctl_cmd_start { + __le16 mode; /* OR-ing any of UCAN_MODE_* */ +} __packed; + +struct ucan_ctl_cmd_set_bittiming { + __le32 tq; /* Time quanta (TQ) in nanoseconds */ + __le16 brp; /* TQ Prescaler */ + __le16 sample_point; /* Samplepoint on tenth percent */ + u8 prop_seg; /* Propagation segment in TQs */ + u8 phase_seg1; /* Phase buffer segment 1 in TQs */ + u8 phase_seg2; /* Phase buffer segment 2 in TQs */ + u8 sjw; /* Synchronisation jump width in TQs */ +} __packed; + +struct ucan_ctl_cmd_device_info { + __le32 freq; /* Clock Frequency for tq generation */ + u8 tx_fifo; /* Size of the transmission fifo */ + u8 sjw_max; /* can_bittiming fields... */ + u8 tseg1_min; + u8 tseg1_max; + u8 tseg2_min; + u8 tseg2_max; + __le16 brp_inc; + __le32 brp_min; + __le32 brp_max; /* ...can_bittiming fields */ + __le16 ctrlmodes; /* supported control modes */ + __le16 hwfilter; /* Number of HW filter banks */ + __le16 rxmboxes; /* Number of receive Mailboxes */ +} __packed; + +struct ucan_ctl_cmd_get_protocol_version { + __le32 version; +} __packed; + +union ucan_ctl_payload { + /* Setup Bittiming + * bmRequest == UCAN_COMMAND_START + */ + struct ucan_ctl_cmd_start cmd_start; + /* Setup Bittiming + * bmRequest == UCAN_COMMAND_SET_BITTIMING + */ + struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming; + /* Get Device Information + * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO + */ + struct ucan_ctl_cmd_device_info cmd_get_device_info; + /* Get Protocol Version + * bmRequest == UCAN_COMMAND_GET; + * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION + */ + struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; + + u8 raw[128]; +} __packed; + +enum { + UCAN_TX_COMPLETE_SUCCESS = BIT(0), +}; + +/* Transmission Complete within ucan_message_in */ +struct ucan_tx_complete_entry_t { + u8 echo_index; + u8 flags; +} __packed __aligned(0x2); + +/* CAN Data message format within ucan_message_in/out */ +struct ucan_can_msg { + /* note DLC is computed by + * msg.len - sizeof (msg.len) + * - sizeof (msg.type) + * - sizeof (msg.can_msg.id) + */ + __le32 id; + + union { + u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */ + u8 dlc; /* RTR dlc */ + }; +} __packed; + +/* OUT Endpoint, outbound messages */ +struct ucan_message_out { + __le16 len; /* Length of the content include header */ + u8 type; /* UCAN_OUT_TX and friends */ + u8 subtype; /* command sub type */ + + union { + /* Transmit CAN frame + * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) + * subtype stores the echo id + */ + struct ucan_can_msg can_msg; + } msg; +} __packed __aligned(0x4); + +/* IN Endpoint, inbound messages */ +struct ucan_message_in { + __le16 len; /* Length of the content include header */ + u8 type; /* UCAN_IN_RX and friends */ + u8 subtype; /* command sub type */ + + union { + /* CAN Frame received + * (type == UCAN_IN_RX) + * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) + */ + struct ucan_can_msg can_msg; + + /* CAN transmission complete + * (type == UCAN_IN_TX_COMPLETE) + */ + struct ucan_tx_complete_entry_t can_tx_complete_msg[0]; + } __aligned(0x4) msg; +} __packed; + +/* Macros to calculate message lengths */ +#define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) + +#define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg) +#define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member)) + +struct ucan_priv; + +/* Context Information for transmission URBs */ +struct ucan_urb_context { + struct ucan_priv *up; + u8 dlc; + bool allocated; +}; + +/* Information reported by the USB device */ +struct ucan_device_info { + struct can_bittiming_const bittiming_const; + u8 tx_fifo; +}; + +/* Driver private data */ +struct ucan_priv { + /* must be the first member */ + struct can_priv can; + + /* linux USB device structures */ + struct usb_device *udev; + struct usb_interface *intf; + struct net_device *netdev; + + /* lock for can->echo_skb (used around + * can_put/get/free_echo_skb + */ + spinlock_t echo_skb_lock; + + /* usb device information information */ + u8 intf_index; + u8 in_ep_addr; + u8 out_ep_addr; + u16 in_ep_size; + + /* transmission and reception buffers */ + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs; + + union ucan_ctl_payload *ctl_msg_buffer; + struct ucan_device_info device_info; + + /* transmission control information and locks */ + spinlock_t context_lock; + unsigned int available_tx_urbs; + struct ucan_urb_context *context_array; +}; + +static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len) +{ + if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) + return get_can_dlc(msg->dlc); + else + return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); +} + +static void ucan_release_context_array(struct ucan_priv *up) +{ + if (!up->context_array) + return; + + /* lock is not needed because, driver is currently opening or closing */ + up->available_tx_urbs = 0; + + kfree(up->context_array); + up->context_array = NULL; +} + +static int ucan_alloc_context_array(struct ucan_priv *up) +{ + int i; + + /* release contexts if any */ + ucan_release_context_array(up); + + up->context_array = kcalloc(up->device_info.tx_fifo, + sizeof(*up->context_array), + GFP_KERNEL); + if (!up->context_array) { + netdev_err(up->netdev, + "Not enough memory to allocate tx contexts\n"); + return -ENOMEM; + } + + for (i = 0; i < up->device_info.tx_fifo; i++) { + up->context_array[i].allocated = false; + up->context_array[i].up = up; + } + + /* lock is not needed because, driver is currently opening */ + up->available_tx_urbs = up->device_info.tx_fifo; + + return 0; +} + +static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up) +{ + int i; + unsigned long flags; + struct ucan_urb_context *ret = NULL; + + if (WARN_ON_ONCE(!up->context_array)) + return NULL; + + /* execute context operation atomically */ + spin_lock_irqsave(&up->context_lock, flags); + + for (i = 0; i < up->device_info.tx_fifo; i++) { + if (!up->context_array[i].allocated) { + /* update context */ + ret = &up->context_array[i]; + up->context_array[i].allocated = true; + + /* stop queue if necessary */ + up->available_tx_urbs--; + if (!up->available_tx_urbs) + netif_stop_queue(up->netdev); + + break; + } + } + + spin_unlock_irqrestore(&up->context_lock, flags); + return ret; +} + +static bool ucan_release_context(struct ucan_priv *up, + struct ucan_urb_context *ctx) +{ + unsigned long flags; + bool ret = false; + + if (WARN_ON_ONCE(!up->context_array)) + return false; + + /* execute context operation atomically */ + spin_lock_irqsave(&up->context_lock, flags); + + /* context was not allocated, maybe the device sent garbage */ + if (ctx->allocated) { + ctx->allocated = false; + + /* check if the queue needs to be woken */ + if (!up->available_tx_urbs) + netif_wake_queue(up->netdev); + up->available_tx_urbs++; + + ret = true; + } + + spin_unlock_irqrestore(&up->context_lock, flags); + return ret; +} + +static int ucan_ctrl_command_out(struct ucan_priv *up, + u8 cmd, u16 subcmd, u16 datalen) +{ + return usb_control_msg(up->udev, + usb_sndctrlpipe(up->udev, 0), + cmd, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + subcmd, + up->intf_index, + up->ctl_msg_buffer, + datalen, + UCAN_USB_CTL_PIPE_TIMEOUT); +} + +static int ucan_device_request_in(struct ucan_priv *up, + u8 cmd, u16 subcmd, u16 datalen) +{ + return usb_control_msg(up->udev, + usb_rcvctrlpipe(up->udev, 0), + cmd, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + subcmd, + 0, + up->ctl_msg_buffer, + datalen, + UCAN_USB_CTL_PIPE_TIMEOUT); +} + +/* Parse the device information structure reported by the device and + * setup private variables accordingly + */ +static void ucan_parse_device_info(struct ucan_priv *up, + struct ucan_ctl_cmd_device_info *device_info) +{ + struct can_bittiming_const *bittiming = + &up->device_info.bittiming_const; + u16 ctrlmodes; + + /* store the data */ + up->can.clock.freq = le32_to_cpu(device_info->freq); + up->device_info.tx_fifo = device_info->tx_fifo; + strcpy(bittiming->name, "ucan"); + bittiming->tseg1_min = device_info->tseg1_min; + bittiming->tseg1_max = device_info->tseg1_max; + bittiming->tseg2_min = device_info->tseg2_min; + bittiming->tseg2_max = device_info->tseg2_max; + bittiming->sjw_max = device_info->sjw_max; + bittiming->brp_min = le32_to_cpu(device_info->brp_min); + bittiming->brp_max = le32_to_cpu(device_info->brp_max); + bittiming->brp_inc = le16_to_cpu(device_info->brp_inc); + + ctrlmodes = le16_to_cpu(device_info->ctrlmodes); + + up->can.ctrlmode_supported = 0; + + if (ctrlmodes & UCAN_MODE_LOOPBACK) + up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; + if (ctrlmodes & UCAN_MODE_SILENT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; + if (ctrlmodes & UCAN_MODE_3_SAMPLES) + up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + if (ctrlmodes & UCAN_MODE_ONE_SHOT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + if (ctrlmodes & UCAN_MODE_BERR_REPORT) + up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; +} + +/* Handle a CAN error frame that we have received from the device. + * Returns true if the can state has changed. + */ +static bool ucan_handle_error_frame(struct ucan_priv *up, + struct ucan_message_in *m, + canid_t canid) +{ + enum can_state new_state = up->can.state; + struct net_device_stats *net_stats = &up->netdev->stats; + struct can_device_stats *can_stats = &up->can.can_stats; + + if (canid & CAN_ERR_LOSTARB) + can_stats->arbitration_lost++; + + if (canid & CAN_ERR_BUSERROR) + can_stats->bus_error++; + + if (canid & CAN_ERR_ACK) + net_stats->tx_errors++; + + if (canid & CAN_ERR_BUSOFF) + new_state = CAN_STATE_BUS_OFF; + + /* controller problems, details in data[1] */ + if (canid & CAN_ERR_CRTL) { + u8 d1 = m->msg.can_msg.data[1]; + + if (d1 & CAN_ERR_CRTL_RX_OVERFLOW) + net_stats->rx_over_errors++; + + /* controller state bits: if multiple are set the worst wins */ + if (d1 & CAN_ERR_CRTL_ACTIVE) + new_state = CAN_STATE_ERROR_ACTIVE; + + if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) + new_state = CAN_STATE_ERROR_WARNING; + + if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) + new_state = CAN_STATE_ERROR_PASSIVE; + } + + /* protocol error, details in data[2] */ + if (canid & CAN_ERR_PROT) { + u8 d2 = m->msg.can_msg.data[2]; + + if (d2 & CAN_ERR_PROT_TX) + net_stats->tx_errors++; + else + net_stats->rx_errors++; + } + + /* no state change - we are done */ + if (up->can.state == new_state) + return false; + + /* we switched into a better state */ + if (up->can.state > new_state) { + up->can.state = new_state; + return true; + } + + /* we switched into a worse state */ + up->can.state = new_state; + switch (new_state) { + case CAN_STATE_BUS_OFF: + can_stats->bus_off++; + can_bus_off(up->netdev); + break; + case CAN_STATE_ERROR_PASSIVE: + can_stats->error_passive++; + break; + case CAN_STATE_ERROR_WARNING: + can_stats->error_warning++; + break; + default: + break; + } + return true; +} + +/* Callback on reception of a can frame via the IN endpoint + * + * This function allocates an skb and transferres it to the Linux + * network stack + */ +static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) +{ + int len; + canid_t canid; + struct can_frame *cf; + struct sk_buff *skb; + struct net_device_stats *stats = &up->netdev->stats; + + /* get the contents of the length field */ + len = le16_to_cpu(m->len); + + /* check sanity */ + if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) { + netdev_warn(up->netdev, "invalid input message len: %d\n", len); + return; + } + + /* handle error frames */ + canid = le32_to_cpu(m->msg.can_msg.id); + if (canid & CAN_ERR_FLAG) { + bool busstate_changed = ucan_handle_error_frame(up, m, canid); + + /* if berr-reporting is off only state changes get through */ + if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + !busstate_changed) + return; + } else { + canid_t canid_mask; + /* compute the mask for canid */ + canid_mask = CAN_RTR_FLAG; + if (canid & CAN_EFF_FLAG) + canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG; + else + canid_mask |= CAN_SFF_MASK; + + if (canid & ~canid_mask) + netdev_warn(up->netdev, + "unexpected bits set (canid %x, mask %x)", + canid, canid_mask); + + canid &= canid_mask; + } + + /* allocate skb */ + skb = alloc_can_skb(up->netdev, &cf); + if (!skb) + return; + + /* fill the can frame */ + cf->can_id = canid; + + /* compute DLC taking RTR_FLAG into account */ + cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len); + + /* copy the payload of non RTR frames */ + if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) + memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc); + + /* don't count error frames as real packets */ + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + /* pass it to Linux */ + netif_rx(skb); +} + +/* callback indicating completed transmission */ +static void ucan_tx_complete_msg(struct ucan_priv *up, + struct ucan_message_in *m) +{ + unsigned long flags; + u16 count, i; + u8 echo_index, dlc; + u16 len = le16_to_cpu(m->len); + + struct ucan_urb_context *context; + + if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) { + netdev_err(up->netdev, "invalid tx complete length\n"); + return; + } + + count = (len - UCAN_IN_HDR_SIZE) / 2; + for (i = 0; i < count; i++) { + /* we did not submit such echo ids */ + echo_index = m->msg.can_tx_complete_msg[i].echo_index; + if (echo_index >= up->device_info.tx_fifo) { + up->netdev->stats.tx_errors++; + netdev_err(up->netdev, + "invalid echo_index %d received\n", + echo_index); + continue; + } + + /* gather information from the context */ + context = &up->context_array[echo_index]; + dlc = READ_ONCE(context->dlc); + + /* Release context and restart queue if necessary. + * Also check if the context was allocated + */ + if (!ucan_release_context(up, context)) + continue; + + spin_lock_irqsave(&up->echo_skb_lock, flags); + if (m->msg.can_tx_complete_msg[i].flags & + UCAN_TX_COMPLETE_SUCCESS) { + /* update statistics */ + up->netdev->stats.tx_packets++; + up->netdev->stats.tx_bytes += dlc; + can_get_echo_skb(up->netdev, echo_index); + } else { + up->netdev->stats.tx_dropped++; + can_free_echo_skb(up->netdev, echo_index); + } + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + } +} + +/* callback on reception of a USB message */ +static void ucan_read_bulk_callback(struct urb *urb) +{ + int ret; + int pos; + struct ucan_priv *up = urb->context; + struct net_device *netdev = up->netdev; + struct ucan_message_in *m; + + /* the device is not up and the driver should not receive any + * data on the bulk in pipe + */ + if (WARN_ON(!up->context_array)) { + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + return; + } + + /* check URB status */ + switch (urb->status) { + case 0: + break; + case -ENOENT: + case -EPIPE: + case -EPROTO: + case -ESHUTDOWN: + case -ETIME: + /* urb is not resubmitted -> free dma data */ + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n", + urb->status); + return; + default: + goto resubmit; + } + + /* sanity check */ + if (!netif_device_present(netdev)) + return; + + /* iterate over input */ + pos = 0; + while (pos < urb->actual_length) { + int len; + + /* check sanity (length of header) */ + if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) { + netdev_warn(up->netdev, + "invalid message (short; no hdr; l:%d)\n", + urb->actual_length); + goto resubmit; + } + + /* setup the message address */ + m = (struct ucan_message_in *) + ((u8 *)urb->transfer_buffer + pos); + len = le16_to_cpu(m->len); + + /* check sanity (length of content) */ + if (urb->actual_length - pos < len) { + netdev_warn(up->netdev, + "invalid message (short; no data; l:%d)\n", + urb->actual_length); + print_hex_dump(KERN_WARNING, + "raw data: ", + DUMP_PREFIX_ADDRESS, + 16, + 1, + urb->transfer_buffer, + urb->actual_length, + true); + + goto resubmit; + } + + switch (m->type) { + case UCAN_IN_RX: + ucan_rx_can_msg(up, m); + break; + case UCAN_IN_TX_COMPLETE: + ucan_tx_complete_msg(up, m); + break; + default: + netdev_warn(up->netdev, + "invalid message (type; t:%d)\n", + m->type); + break; + } + + /* proceed to next message */ + pos += len; + /* align to 4 byte boundary */ + pos = round_up(pos, 4); + } + +resubmit: + /* resubmit urb when done */ + usb_fill_bulk_urb(urb, up->udev, + usb_rcvbulkpipe(up->udev, + up->in_ep_addr), + urb->transfer_buffer, + up->in_ep_size, + ucan_read_bulk_callback, + up); + + usb_anchor_urb(urb, &up->rx_urbs); + ret = usb_submit_urb(urb, GFP_KERNEL); + + if (ret < 0) { + netdev_err(up->netdev, + "failed resubmitting read bulk urb: %d\n", + ret); + + usb_unanchor_urb(urb); + usb_free_coherent(up->udev, + up->in_ep_size, + urb->transfer_buffer, + urb->transfer_dma); + + if (ret == -ENODEV) + netif_device_detach(netdev); + } +} + +/* callback after transmission of a USB message */ +static void ucan_write_bulk_callback(struct urb *urb) +{ + unsigned long flags; + struct ucan_priv *up; + struct ucan_urb_context *context = urb->context; + + /* get the urb context */ + if (WARN_ON_ONCE(!context)) + return; + + /* free up our allocated buffer */ + usb_free_coherent(urb->dev, + sizeof(struct ucan_message_out), + urb->transfer_buffer, + urb->transfer_dma); + + up = context->up; + if (WARN_ON_ONCE(!up)) + return; + + /* sanity check */ + if (!netif_device_present(up->netdev)) + return; + + /* transmission failed (USB - the device will not send a TX complete) */ + if (urb->status) { + netdev_warn(up->netdev, + "failed to transmit USB message to device: %d\n", + urb->status); + + /* update counters an cleanup */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_free_echo_skb(up->netdev, context - up->context_array); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + up->netdev->stats.tx_dropped++; + + /* release context and restart the queue if necessary */ + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "urb failed, failed to release context\n"); + } +} + +static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs) +{ + int i; + + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + if (urbs[i]) { + usb_unanchor_urb(urbs[i]); + usb_free_coherent(up->udev, + up->in_ep_size, + urbs[i]->transfer_buffer, + urbs[i]->transfer_dma); + usb_free_urb(urbs[i]); + } + } + + memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); +} + +static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up, + struct urb **urbs) +{ + int i; + + memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); + + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + void *buf; + + urbs[i] = usb_alloc_urb(0, GFP_KERNEL); + if (!urbs[i]) + goto err; + + buf = usb_alloc_coherent(up->udev, + up->in_ep_size, + GFP_KERNEL, &urbs[i]->transfer_dma); + if (!buf) { + /* cleanup this urb */ + usb_free_urb(urbs[i]); + urbs[i] = NULL; + goto err; + } + + usb_fill_bulk_urb(urbs[i], up->udev, + usb_rcvbulkpipe(up->udev, + up->in_ep_addr), + buf, + up->in_ep_size, + ucan_read_bulk_callback, + up); + + urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + usb_anchor_urb(urbs[i], &up->rx_urbs); + } + return 0; + +err: + /* cleanup other unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + return -ENOMEM; +} + +/* Submits rx urbs with the semantic: Either submit all, or cleanup + * everything. I case of errors submitted urbs are killed and all urbs in + * the array are freed. I case of no errors every entry in the urb + * array is set to NULL. + */ +static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs) +{ + int i, ret; + + /* Iterate over all urbs to submit. On success remove the urb + * from the list. + */ + for (i = 0; i < UCAN_MAX_RX_URBS; i++) { + ret = usb_submit_urb(urbs[i], GFP_KERNEL); + if (ret) { + netdev_err(up->netdev, + "could not submit urb; code: %d\n", + ret); + goto err; + } + + /* Anchor URB and drop reference, USB core will take + * care of freeing it + */ + usb_free_urb(urbs[i]); + urbs[i] = NULL; + } + return 0; + +err: + /* Cleanup unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + + /* Kill urbs that are already submitted */ + usb_kill_anchored_urbs(&up->rx_urbs); + + return ret; +} + +/* Open the network device */ +static int ucan_open(struct net_device *netdev) +{ + int ret, ret_cleanup; + u16 ctrlmode; + struct urb *urbs[UCAN_MAX_RX_URBS]; + struct ucan_priv *up = netdev_priv(netdev); + + ret = ucan_alloc_context_array(up); + if (ret) + return ret; + + /* Allocate and prepare IN URBS - allocated and anchored + * urbs are stored in urbs[] for clean + */ + ret = ucan_prepare_and_anchor_rx_urbs(up, urbs); + if (ret) + goto err_contexts; + + /* Check the control mode */ + ctrlmode = 0; + if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + ctrlmode |= UCAN_MODE_LOOPBACK; + if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + ctrlmode |= UCAN_MODE_SILENT; + if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + ctrlmode |= UCAN_MODE_3_SAMPLES; + if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + ctrlmode |= UCAN_MODE_ONE_SHOT; + + /* Enable this in any case - filtering is down within the + * receive path + */ + ctrlmode |= UCAN_MODE_BERR_REPORT; + up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode); + + /* Driver is ready to receive data - start the USB device */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2); + if (ret < 0) { + netdev_err(up->netdev, + "could not start device, code: %d\n", + ret); + goto err_reset; + } + + /* Call CAN layer open */ + ret = open_candev(netdev); + if (ret) + goto err_stop; + + /* Driver is ready to receive data. Submit RX URBS */ + ret = ucan_submit_rx_urbs(up, urbs); + if (ret) + goto err_stop; + + up->can.state = CAN_STATE_ERROR_ACTIVE; + + /* Start the network queue */ + netif_start_queue(netdev); + + return 0; + +err_stop: + /* The device have started already stop it */ + ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); + if (ret_cleanup < 0) + netdev_err(up->netdev, + "could not stop device, code: %d\n", + ret_cleanup); + +err_reset: + /* The device might have received data, reset it for + * consistent state + */ + ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret_cleanup < 0) + netdev_err(up->netdev, + "could not reset device, code: %d\n", + ret_cleanup); + + /* clean up unsubmitted urbs */ + ucan_cleanup_rx_urbs(up, urbs); + +err_contexts: + ucan_release_context_array(up); + return ret; +} + +static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, + struct ucan_urb_context *context, + struct can_frame *cf, + u8 echo_index) +{ + int mlen; + struct urb *urb; + struct ucan_message_out *m; + + /* create a URB, and a buffer for it, and copy the data to the URB */ + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + netdev_err(up->netdev, "no memory left for URBs\n"); + return NULL; + } + + m = usb_alloc_coherent(up->udev, + sizeof(struct ucan_message_out), + GFP_ATOMIC, + &urb->transfer_dma); + if (!m) { + netdev_err(up->netdev, "no memory left for USB buffer\n"); + usb_free_urb(urb); + return NULL; + } + + /* build the USB message */ + m->type = UCAN_OUT_TX; + m->msg.can_msg.id = cpu_to_le32(cf->can_id); + + if (cf->can_id & CAN_RTR_FLAG) { + mlen = UCAN_OUT_HDR_SIZE + + offsetof(struct ucan_can_msg, dlc) + + sizeof(m->msg.can_msg.dlc); + m->msg.can_msg.dlc = cf->can_dlc; + } else { + mlen = UCAN_OUT_HDR_SIZE + + sizeof(m->msg.can_msg.id) + cf->can_dlc; + memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc); + } + m->len = cpu_to_le16(mlen); + + context->dlc = cf->can_dlc; + + m->subtype = echo_index; + + /* build the urb */ + usb_fill_bulk_urb(urb, up->udev, + usb_sndbulkpipe(up->udev, + up->out_ep_addr), + m, mlen, ucan_write_bulk_callback, context); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return urb; +} + +static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb) +{ + usb_free_coherent(up->udev, sizeof(struct ucan_message_out), + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); +} + +/* callback when Linux needs to send a can frame */ +static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + unsigned long flags; + int ret; + u8 echo_index; + struct urb *urb; + struct ucan_urb_context *context; + struct ucan_priv *up = netdev_priv(netdev); + struct can_frame *cf = (struct can_frame *)skb->data; + + /* check skb */ + if (can_dropped_invalid_skb(netdev, skb)) + return NETDEV_TX_OK; + + /* allocate a context and slow down tx path, if fifo state is low */ + context = ucan_alloc_context(up); + echo_index = context - up->context_array; + + if (WARN_ON_ONCE(!context)) + return NETDEV_TX_BUSY; + + /* prepare urb for transmission */ + urb = ucan_prepare_tx_urb(up, context, cf, echo_index); + if (!urb) + goto drop; + + /* put the skb on can loopback stack */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_put_echo_skb(skb, up->netdev, echo_index); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + /* transmit it */ + usb_anchor_urb(urb, &up->tx_urbs); + ret = usb_submit_urb(urb, GFP_ATOMIC); + + /* cleanup urb */ + if (ret) { + /* on error, clean up */ + usb_unanchor_urb(urb); + ucan_clean_up_tx_urb(up, urb); + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "xmit err: failed to release context\n"); + + /* remove the skb from the echo stack - this also + * frees the skb + */ + spin_lock_irqsave(&up->echo_skb_lock, flags); + can_free_echo_skb(up->netdev, echo_index); + spin_unlock_irqrestore(&up->echo_skb_lock, flags); + + if (ret == -ENODEV) { + netif_device_detach(up->netdev); + } else { + netdev_warn(up->netdev, + "xmit err: failed to submit urb %d\n", + ret); + up->netdev->stats.tx_dropped++; + } + return NETDEV_TX_OK; + } + + netif_trans_update(netdev); + + /* release ref, as we do not need the urb anymore */ + usb_free_urb(urb); + + return NETDEV_TX_OK; + +drop: + if (!ucan_release_context(up, context)) + netdev_err(up->netdev, + "xmit drop: failed to release context\n"); + dev_kfree_skb(skb); + up->netdev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +/* Device goes down + * + * Clean up used resources + */ +static int ucan_close(struct net_device *netdev) +{ + int ret; + struct ucan_priv *up = netdev_priv(netdev); + + up->can.state = CAN_STATE_STOPPED; + + /* stop sending data */ + usb_kill_anchored_urbs(&up->tx_urbs); + + /* stop receiving data */ + usb_kill_anchored_urbs(&up->rx_urbs); + + /* stop and reset can device */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); + if (ret < 0) + netdev_err(up->netdev, + "could not stop device, code: %d\n", + ret); + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret < 0) + netdev_err(up->netdev, + "could not reset device, code: %d\n", + ret); + + netif_stop_queue(netdev); + + ucan_release_context_array(up); + + close_candev(up->netdev); + return 0; +} + +/* CAN driver callbacks */ +static const struct net_device_ops ucan_netdev_ops = { + .ndo_open = ucan_open, + .ndo_stop = ucan_close, + .ndo_start_xmit = ucan_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +/* Request to set bittiming + * + * This function generates an USB set bittiming message and transmits + * it to the device + */ +static int ucan_set_bittiming(struct net_device *netdev) +{ + int ret; + struct ucan_priv *up = netdev_priv(netdev); + struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming; + + cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming; + cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq); + cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp); + cmd_set_bittiming->sample_point = + cpu_to_le16(up->can.bittiming.sample_point); + cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg; + cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1; + cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2; + cmd_set_bittiming->sjw = up->can.bittiming.sjw; + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0, + sizeof(*cmd_set_bittiming)); + return (ret < 0) ? ret : 0; +} + +/* Restart the device to get it out of BUS-OFF state. + * Called when the user runs "ip link set can1 type can restart". + */ +static int ucan_set_mode(struct net_device *netdev, enum can_mode mode) +{ + int ret; + unsigned long flags; + struct ucan_priv *up = netdev_priv(netdev); + + switch (mode) { + case CAN_MODE_START: + netdev_dbg(up->netdev, "restarting device\n"); + + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0); + up->can.state = CAN_STATE_ERROR_ACTIVE; + + /* check if queue can be restarted, + * up->available_tx_urbs must be protected by the + * lock + */ + spin_lock_irqsave(&up->context_lock, flags); + + if (up->available_tx_urbs > 0) + netif_wake_queue(up->netdev); + + spin_unlock_irqrestore(&up->context_lock, flags); + + return ret; + default: + return -EOPNOTSUPP; + } +} + +/* Probe the device, reset it and gather general device information */ +static int ucan_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int ret; + int i; + u32 protocol_version; + struct usb_device *udev; + struct net_device *netdev; + struct usb_host_interface *iface_desc; + struct ucan_priv *up; + struct usb_endpoint_descriptor *ep; + u16 in_ep_size; + u16 out_ep_size; + u8 in_ep_addr; + u8 out_ep_addr; + union ucan_ctl_payload *ctl_msg_buffer; + char firmware_str[sizeof(union ucan_ctl_payload) + 1]; + + udev = interface_to_usbdev(intf); + + /* Stage 1 - Interface Parsing + * --------------------------- + * + * Identifie the device USB interface descriptor and its + * endpoints. Probing is aborted on errors. + */ + + /* check if the interface is sane */ + iface_desc = intf->cur_altsetting; + if (!iface_desc) + return -ENODEV; + + dev_info(&udev->dev, + "%s: probing device on interface #%d\n", + UCAN_DRIVER_NAME, + iface_desc->desc.bInterfaceNumber); + + /* interface sanity check */ + if (iface_desc->desc.bNumEndpoints != 2) { + dev_err(&udev->dev, + "%s: invalid EP count (%d)", + UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints); + goto err_firmware_needs_update; + } + + /* check interface endpoints */ + in_ep_addr = 0; + out_ep_addr = 0; + in_ep_size = 0; + out_ep_size = 0; + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { + ep = &iface_desc->endpoint[i].desc; + + if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) && + ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK)) { + /* In Endpoint */ + in_ep_addr = ep->bEndpointAddress; + in_ep_addr &= USB_ENDPOINT_NUMBER_MASK; + in_ep_size = le16_to_cpu(ep->wMaxPacketSize); + } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == + 0) && + ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK)) { + /* Out Endpoint */ + out_ep_addr = ep->bEndpointAddress; + out_ep_addr &= USB_ENDPOINT_NUMBER_MASK; + out_ep_size = le16_to_cpu(ep->wMaxPacketSize); + } + } + + /* check if interface is sane */ + if (!in_ep_addr || !out_ep_addr) { + dev_err(&udev->dev, "%s: invalid endpoint configuration\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (in_ep_size < sizeof(struct ucan_message_in)) { + dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (out_ep_size < sizeof(struct ucan_message_out)) { + dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + + /* Stage 2 - Device Identification + * ------------------------------- + * + * The device interface seems to be a ucan device. Do further + * compatibility checks. On error probing is aborted, on + * success this stage leaves the ctl_msg_buffer with the + * reported contents of a GET_INFO command (supported + * bittimings, tx_fifo depth). This information is used in + * Stage 3 for the final driver initialisation. + */ + + /* Prepare Memory for control transferes */ + ctl_msg_buffer = devm_kzalloc(&udev->dev, + sizeof(union ucan_ctl_payload), + GFP_KERNEL); + if (!ctl_msg_buffer) { + dev_err(&udev->dev, + "%s: failed to allocate control pipe memory\n", + UCAN_DRIVER_NAME); + return -ENOMEM; + } + + /* get protocol version + * + * note: ucan_ctrl_command_* wrappers cannot be used yet + * because `up` is initialised in Stage 3 + */ + ret = usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + UCAN_COMMAND_GET, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + UCAN_COMMAND_GET_PROTOCOL_VERSION, + iface_desc->desc.bInterfaceNumber, + ctl_msg_buffer, + sizeof(union ucan_ctl_payload), + UCAN_USB_CTL_PIPE_TIMEOUT); + + /* older firmware version do not support this command - those + * are not supported by this drive + */ + if (ret != 4) { + dev_err(&udev->dev, + "%s: could not read protocol version, ret=%d\n", + UCAN_DRIVER_NAME, ret); + if (ret >= 0) + ret = -EINVAL; + goto err_firmware_needs_update; + } + + /* this driver currently supports protocol version 3 only */ + protocol_version = + le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version); + if (protocol_version < UCAN_PROTOCOL_VERSION_MIN || + protocol_version > UCAN_PROTOCOL_VERSION_MAX) { + dev_err(&udev->dev, + "%s: device protocol version %d is not supported\n", + UCAN_DRIVER_NAME, protocol_version); + goto err_firmware_needs_update; + } + + /* request the device information and store it in ctl_msg_buffer + * + * note: ucan_ctrl_command_* wrappers connot be used yet + * because `up` is initialised in Stage 3 + */ + ret = usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + UCAN_COMMAND_GET, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + UCAN_COMMAND_GET_INFO, + iface_desc->desc.bInterfaceNumber, + ctl_msg_buffer, + sizeof(ctl_msg_buffer->cmd_get_device_info), + UCAN_USB_CTL_PIPE_TIMEOUT); + + if (ret < 0) { + dev_err(&udev->dev, "%s: failed to retrieve device info\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) { + dev_err(&udev->dev, "%s: device reported invalid device info\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) { + dev_err(&udev->dev, + "%s: device reported invalid tx-fifo size\n", + UCAN_DRIVER_NAME); + goto err_firmware_needs_update; + } + + /* Stage 3 - Driver Initialisation + * ------------------------------- + * + * Register device to Linux, prepare private structures and + * reset the device. + */ + + /* allocate driver resources */ + netdev = alloc_candev(sizeof(struct ucan_priv), + ctl_msg_buffer->cmd_get_device_info.tx_fifo); + if (!netdev) { + dev_err(&udev->dev, + "%s: cannot allocate candev\n", UCAN_DRIVER_NAME); + return -ENOMEM; + } + + up = netdev_priv(netdev); + + /* initialze data */ + up->udev = udev; + up->intf = intf; + up->netdev = netdev; + up->intf_index = iface_desc->desc.bInterfaceNumber; + up->in_ep_addr = in_ep_addr; + up->out_ep_addr = out_ep_addr; + up->in_ep_size = in_ep_size; + up->ctl_msg_buffer = ctl_msg_buffer; + up->context_array = NULL; + up->available_tx_urbs = 0; + + up->can.state = CAN_STATE_STOPPED; + up->can.bittiming_const = &up->device_info.bittiming_const; + up->can.do_set_bittiming = ucan_set_bittiming; + up->can.do_set_mode = &ucan_set_mode; + spin_lock_init(&up->context_lock); + spin_lock_init(&up->echo_skb_lock); + netdev->netdev_ops = &ucan_netdev_ops; + + usb_set_intfdata(intf, up); + SET_NETDEV_DEV(netdev, &intf->dev); + + /* parse device information + * the data retrieved in Stage 2 is still available in + * up->ctl_msg_buffer + */ + ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); + + /* just print some device information - if available */ + ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, + sizeof(union ucan_ctl_payload)); + if (ret > 0) { + /* copy string while ensuring zero terminiation */ + strncpy(firmware_str, up->ctl_msg_buffer->raw, + sizeof(union ucan_ctl_payload)); + firmware_str[sizeof(union ucan_ctl_payload)] = '\0'; + } else { + strcpy(firmware_str, "unknown"); + } + + /* device is compatible, reset it */ + ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); + if (ret < 0) + goto err_free_candev; + + init_usb_anchor(&up->rx_urbs); + init_usb_anchor(&up->tx_urbs); + + up->can.state = CAN_STATE_STOPPED; + + /* register the device */ + ret = register_candev(netdev); + if (ret) + goto err_free_candev; + + /* initialisation complete, log device info */ + netdev_info(up->netdev, "registered device\n"); + netdev_info(up->netdev, "firmware string: %s\n", firmware_str); + + /* success */ + return 0; + +err_free_candev: + free_candev(netdev); + return ret; + +err_firmware_needs_update: + dev_err(&udev->dev, + "%s: probe failed; try to update the device firmware\n", + UCAN_DRIVER_NAME); + return -ENODEV; +} + +/* disconnect the device */ +static void ucan_disconnect(struct usb_interface *intf) +{ + struct usb_device *udev; + struct ucan_priv *up = usb_get_intfdata(intf); + + udev = interface_to_usbdev(intf); + + usb_set_intfdata(intf, NULL); + + if (up) { + unregister_netdev(up->netdev); + free_candev(up->netdev); + } +} + +static struct usb_device_id ucan_table[] = { + /* Mule (soldered onto compute modules) */ + {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)}, + /* Seal (standalone USB stick) */ + {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)}, + {} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(usb, ucan_table); +/* driver callbacks */ +static struct usb_driver ucan_driver = { + .name = UCAN_DRIVER_NAME, + .probe = ucan_probe, + .disconnect = ucan_disconnect, + .id_table = ucan_table, +}; + +module_usb_driver(ucan_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>"); +MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>"); +MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices"); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5a24039733ef..045f0845e665 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2,7 +2,7 @@ * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. - * Copyright (C) 2017 Sandvik Mining and Construction Oy + * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. @@ -51,16 +51,34 @@ enum xcan_reg { XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ - XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */ - XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */ - XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */ - XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */ - XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */ - XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */ - XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */ - XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */ + + /* not on CAN FD cores */ + XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ + XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ + XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ + + /* only on CAN FD cores */ + XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ + XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ + XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ + XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ + XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ }; +#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) +#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) +#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) +#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) + +#define XCAN_CANFD_FRAME_SIZE 0x48 +#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ + XCAN_CANFD_FRAME_SIZE * (n)) +#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ + XCAN_CANFD_FRAME_SIZE * (n)) + +/* the single TX mailbox used by this driver on CAN FD HW */ +#define XCAN_TX_MAILBOX_IDX 0 + /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ @@ -70,6 +88,9 @@ enum xcan_reg { #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ +#define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ +#define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ +#define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ @@ -83,6 +104,7 @@ enum xcan_reg { #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ +#define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ @@ -100,15 +122,15 @@ enum xcan_reg { #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ - -#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ - XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ - XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) +#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ +#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ +#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ +#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ +#define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ @@ -118,6 +140,27 @@ enum xcan_reg { #define XCAN_FRAME_MAX_DATA_LEN 8 #define XCAN_TIMEOUT (1 * HZ) +/* TX-FIFO-empty interrupt available */ +#define XCAN_FLAG_TXFEMP 0x0001 +/* RX Match Not Finished interrupt available */ +#define XCAN_FLAG_RXMNF 0x0002 +/* Extended acceptance filters with control at 0xE0 */ +#define XCAN_FLAG_EXT_FILTERS 0x0004 +/* TX mailboxes instead of TX FIFO */ +#define XCAN_FLAG_TX_MAILBOXES 0x0008 +/* RX FIFO with each buffer in separate registers at 0x1100 + * instead of the regular FIFO at 0x50 + */ +#define XCAN_FLAG_RX_FIFO_MULTI 0x0010 + +struct xcan_devtype_data { + unsigned int flags; + const struct can_bittiming_const *bittiming_const; + const char *bus_clk_name; + unsigned int btr_ts2_shift; + unsigned int btr_sjw_shift; +}; + /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. @@ -133,6 +176,7 @@ enum xcan_reg { * @irq_flags: For request_irq() * @bus_clk: Pointer to struct clk * @can_clk: Pointer to struct clk + * @devtype: Device type specific constants */ struct xcan_priv { struct can_priv can; @@ -149,6 +193,7 @@ struct xcan_priv { unsigned long irq_flags; struct clk *bus_clk; struct clk *can_clk; + struct xcan_devtype_data devtype; }; /* CAN Bittiming constants as per Xilinx CAN specs */ @@ -164,9 +209,16 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; -#define XCAN_CAP_WATERMARK 0x0001 -struct xcan_devtype_data { - unsigned int caps; +static const struct can_bittiming_const xcan_bittiming_const_canfd = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 64, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, }; /** @@ -224,6 +276,23 @@ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) } /** + * xcan_rx_int_mask - Get the mask for the receive interrupt + * @priv: Driver private data structure + * + * Return: The receive interrupt mask used by the driver on this HW + */ +static u32 xcan_rx_int_mask(const struct xcan_priv *priv) +{ + /* RXNEMP is better suited for our use case as it cannot be cleared + * while the FIFO is non-empty, but CAN FD HW does not have it + */ + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) + return XCAN_IXR_RXOK_MASK; + else + return XCAN_IXR_RXNEMP_MASK; +} + +/** * set_reset_mode - Resets the CAN device mode * @ndev: Pointer to net_device structure * @@ -287,10 +356,10 @@ static int xcan_set_bittiming(struct net_device *ndev) btr1 = (bt->prop_seg + bt->phase_seg1 - 1); /* Setting Time Segment 2 in BTR Register */ - btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT; + btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; /* Setting Synchronous jump width in BTR Register */ - btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT; + btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); @@ -318,6 +387,7 @@ static int xcan_chip_start(struct net_device *ndev) u32 reg_msr, reg_sr_mask; int err; unsigned long timeout; + u32 ier; /* Check if it is in reset mode */ err = set_reset_mode(ndev); @@ -329,7 +399,15 @@ static int xcan_chip_start(struct net_device *ndev) return err; /* Enable interrupts */ - priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL); + ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | + XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | + XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); + + if (priv->devtype.flags & XCAN_FLAG_RXMNF) + ier |= XCAN_IXR_RXMNF_MASK; + + priv->write_reg(priv, XCAN_IER_OFFSET, ier); /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { @@ -340,6 +418,12 @@ static int xcan_chip_start(struct net_device *ndev) reg_sr_mask = XCAN_SR_NORMAL_MASK; } + /* enable the first extended filter, if any, as cores with extended + * filtering default to non-receipt if all filters are disabled + */ + if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) + priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); + priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); @@ -390,34 +474,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) } /** - * xcan_start_xmit - Starts the transmission - * @skb: sk_buff pointer that contains data to be Txed - * @ndev: Pointer to net_device structure - * - * This function is invoked from upper layers to initiate transmission. This - * function uses the next available free txbuff and populates their fields to - * start the transmission. - * - * Return: 0 on success and failure value on error + * xcan_write_frame - Write a frame to HW + * @skb: sk_buff pointer that contains data to be Txed + * @frame_offset: Register offset to write the frame to */ -static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb, + int frame_offset) { - struct xcan_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; - unsigned long flags; - - if (can_dropped_invalid_skb(ndev, skb)) - return NETDEV_TX_OK; - - /* Check if the TX buffer is full */ - if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & - XCAN_SR_TXFLL_MASK)) { - netif_stop_queue(ndev); - netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n"); - return NETDEV_TX_BUSY; - } + struct can_frame *cf = (struct can_frame *)skb->data; /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { @@ -453,24 +518,44 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (cf->can_dlc > 4) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); + priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); + /* If the CAN frame is RTR frame this write triggers transmission + * (not on CAN FD) + */ + priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); + if (!(cf->can_id & CAN_RTR_FLAG)) { + priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), + data[0]); + /* If the CAN frame is Standard/Extended frame this + * write triggers transmission (not on CAN FD) + */ + priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), + data[1]); + } +} + +/** + * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) + * + * Return: 0 on success, -ENOSPC if FIFO is full. + */ +static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + unsigned long flags; + + /* Check if the TX buffer is full */ + if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & + XCAN_SR_TXFLL_MASK)) + return -ENOSPC; + can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_head++; - /* Write the Frame to Xilinx CAN TX FIFO */ - priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id); - /* If the CAN frame is RTR frame this write triggers tranmission */ - priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc); - if (!(cf->can_id & CAN_RTR_FLAG)) { - priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]); - /* If the CAN frame is Standard/Extended frame this - * write triggers tranmission - */ - priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]); - stats->tx_bytes += cf->can_dlc; - } + xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET); /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ if (priv->tx_max > 1) @@ -482,6 +567,70 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->tx_lock, flags); + return 0; +} + +/** + * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) + * + * Return: 0 on success, -ENOSPC if there is no space + */ +static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + unsigned long flags; + + if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & + BIT(XCAN_TX_MAILBOX_IDX))) + return -ENOSPC; + + can_put_echo_skb(skb, ndev, 0); + + spin_lock_irqsave(&priv->tx_lock, flags); + + priv->tx_head++; + + xcan_write_frame(priv, skb, + XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); + + /* Mark buffer as ready for transmit */ + priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); + + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return 0; +} + +/** + * xcan_start_xmit - Starts the transmission + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure + * + * This function is invoked from upper layers to initiate transmission. + * + * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full + */ +static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + int ret; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) + ret = xcan_start_xmit_mailbox(skb, ndev); + else + ret = xcan_start_xmit_fifo(skb, ndev); + + if (ret < 0) { + netdev_err(ndev, "BUG!, TX full when queue awake!\n"); + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + return NETDEV_TX_OK; } @@ -489,13 +638,14 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) * xcan_rx - Is called from CAN isr to complete the received * frame processing * @ndev: Pointer to net_device structure + * @frame_base: Register offset to the frame to be read * * This function is invoked from the CAN isr(poll) to process the Rx frames. It * does minimal processing and invokes "netif_receive_skb" to complete further * processing. * Return: 1 on success and 0 on failure. */ -static int xcan_rx(struct net_device *ndev) +static int xcan_rx(struct net_device *ndev, int frame_base) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; @@ -510,9 +660,9 @@ static int xcan_rx(struct net_device *ndev) } /* Read a frame from Xilinx zynq CANPS */ - id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET); - dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >> - XCAN_DLCR_DLC_SHIFT; + id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); + dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> + XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ cf->can_dlc = get_can_dlc(dlc); @@ -535,8 +685,8 @@ static int xcan_rx(struct net_device *ndev) } /* DW1/DW2 must always be read to remove message from RXFIFO */ - data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); - data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); + data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); + data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ @@ -594,39 +744,19 @@ static void xcan_set_error_state(struct net_device *ndev, u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); u32 txerr = ecr & XCAN_ECR_TEC_MASK; u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; + enum can_state tx_state = txerr >= rxerr ? new_state : 0; + enum can_state rx_state = txerr <= rxerr ? new_state : 0; + + /* non-ERROR states are handled elsewhere */ + if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) + return; - priv->can.state = new_state; + can_change_state(ndev, cf, tx_state, rx_state); if (cf) { - cf->can_id |= CAN_ERR_CRTL; cf->data[6] = txerr; cf->data[7] = rxerr; } - - switch (new_state) { - case CAN_STATE_ERROR_PASSIVE: - priv->can.can_stats.error_passive++; - if (cf) - cf->data[1] = (rxerr > 127) ? - CAN_ERR_CRTL_RX_PASSIVE : - CAN_ERR_CRTL_TX_PASSIVE; - break; - case CAN_STATE_ERROR_WARNING: - priv->can.can_stats.error_warning++; - if (cf) - cf->data[1] |= (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - break; - case CAN_STATE_ERROR_ACTIVE: - if (cf) - cf->data[1] |= CAN_ERR_CRTL_ACTIVE; - break; - default: - /* non-ERROR states are handled elsewhere */ - WARN_ON(1); - break; - } } /** @@ -703,7 +833,8 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } else { enum can_state new_state = xcan_current_error_state(ndev); - xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + if (new_state != priv->can.state) + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); } /* Check for Arbitration lost interrupt */ @@ -725,6 +856,17 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) } } + /* Check for RX Match Not Finished interrupt */ + if (isr & XCAN_IXR_RXMNF_MASK) { + stats->rx_dropped++; + stats->rx_errors++; + netdev_err(ndev, "RX match not finished, frame discarded\n"); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_UNSPEC; + } + } + /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { if (skb) @@ -809,6 +951,44 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) } /** + * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame + * + * Return: Register offset of the next frame in RX FIFO. + */ +static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) +{ + int offset; + + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { + u32 fsr; + + /* clear RXOK before the is-empty check so that any newly + * received frame will reassert it without a race + */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); + + fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); + + /* check if RX FIFO is empty */ + if (!(fsr & XCAN_FSR_FL_MASK)) + return -ENOENT; + + offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + + } else { + /* check if RX FIFO is empty */ + if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & + XCAN_IXR_RXNEMP_MASK)) + return -ENOENT; + + /* frames are read from a static offset */ + offset = XCAN_RXFIFO_OFFSET; + } + + return offset; +} + +/** * xcan_rx_poll - Poll routine for rx packets (NAPI) * @napi: napi structure pointer * @quota: Max number of rx packets to be processed. @@ -822,14 +1002,24 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct xcan_priv *priv = netdev_priv(ndev); - u32 isr, ier; + u32 ier; int work_done = 0; - - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); - while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - work_done += xcan_rx(ndev); - priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + int frame_offset; + + while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && + (work_done < quota)) { + work_done += xcan_rx(ndev, frame_offset); + + if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) + /* increment read index */ + priv->write_reg(priv, XCAN_FSR_OFFSET, + XCAN_FSR_IRI_MASK); + else + /* clear rx-not-empty (will actually clear only if + * empty) + */ + priv->write_reg(priv, XCAN_ICR_OFFSET, + XCAN_IXR_RXNEMP_MASK); } if (work_done) { @@ -840,7 +1030,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) if (work_done < quota) { napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= XCAN_IXR_RXNEMP_MASK; + ier |= xcan_rx_int_mask(priv); priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; @@ -908,8 +1098,8 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) } while (frames_sent--) { - can_get_echo_skb(ndev, priv->tx_tail % - priv->tx_max); + stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % + priv->tx_max); priv->tx_tail++; stats->tx_packets++; } @@ -939,6 +1129,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; u32 isr_errors; + u32 rx_int_mask = xcan_rx_int_mask(priv); /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); @@ -958,16 +1149,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) /* Check for the type of error interrupt and Processing it */ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | + XCAN_IXR_RXMNF_MASK); if (isr_errors) { priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ - if (isr & XCAN_IXR_RXNEMP_MASK) { + if (isr & rx_int_mask) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~XCAN_IXR_RXNEMP_MASK; + ier &= ~rx_int_mask; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } @@ -1214,13 +1406,35 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { }; static const struct xcan_devtype_data xcan_zynq_data = { - .caps = XCAN_CAP_WATERMARK, + .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, + .bus_clk_name = "pclk", +}; + +static const struct xcan_devtype_data xcan_axi_data = { + .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, + .bus_clk_name = "s_axi_aclk", +}; + +static const struct xcan_devtype_data xcan_canfd_data = { + .flags = XCAN_FLAG_EXT_FILTERS | + XCAN_FLAG_RXMNF | + XCAN_FLAG_TX_MAILBOXES | + XCAN_FLAG_RX_FIFO_MULTI, + .bittiming_const = &xcan_bittiming_const, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, + .bus_clk_name = "s_axi_aclk", }; /* Match table for OF platform binding */ static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, - { .compatible = "xlnx,axi-can-1.00.a", }, + { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, + { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); @@ -1240,9 +1454,12 @@ static int xcan_probe(struct platform_device *pdev) struct net_device *ndev; struct xcan_priv *priv; const struct of_device_id *of_id; - int caps = 0; + const struct xcan_devtype_data *devtype = &xcan_axi_data; void __iomem *addr; - int ret, rx_max, tx_max, tx_fifo_depth; + int ret; + int rx_max, tx_max; + int hw_tx_max, hw_rx_max; + const char *hw_tx_max_property; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1252,25 +1469,33 @@ static int xcan_probe(struct platform_device *pdev) goto err; } - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", - &tx_fifo_depth); - if (ret < 0) - goto err; + of_id = of_match_device(xcan_of_match, &pdev->dev); + if (of_id && of_id->data) + devtype = of_id->data; - ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max); - if (ret < 0) - goto err; + hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? + "tx-mailbox-count" : "tx-fifo-depth"; - of_id = of_match_device(xcan_of_match, &pdev->dev); - if (of_id) { - const struct xcan_devtype_data *devtype_data = of_id->data; + ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, + &hw_tx_max); + if (ret < 0) { + dev_err(&pdev->dev, "missing %s property\n", + hw_tx_max_property); + goto err; + } - if (devtype_data) - caps = devtype_data->caps; + ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &hw_rx_max); + if (ret < 0) { + dev_err(&pdev->dev, + "missing rx-fifo-depth property (mailbox mode is not supported)\n"); + goto err; } - /* There is no way to directly figure out how many frames have been - * sent when the TXOK interrupt is processed. If watermark programming + /* With TX FIFO: + * + * There is no way to directly figure out how many frames have been + * sent when the TXOK interrupt is processed. If TXFEMP * is supported, we can have 2 frames in the FIFO and use TXFEMP * to determine if 1 or 2 frames have been sent. * Theoretically we should be able to use TXFWMEMP to determine up @@ -1279,12 +1504,20 @@ static int xcan_probe(struct platform_device *pdev) * than 2 frames in FIFO) is set anyway with no TXOK (a frame was * sent), which is not a sensible state - possibly TXFWMEMP is not * completely synchronized with the rest of the bits? + * + * With TX mailboxes: + * + * HW sends frames in CAN ID priority order. To preserve FIFO ordering + * we submit frames one at a time. */ - if (caps & XCAN_CAP_WATERMARK) - tx_max = min(tx_fifo_depth, 2); + if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && + (devtype->flags & XCAN_FLAG_TXFEMP)) + tx_max = min(hw_tx_max, 2); else tx_max = 1; + rx_max = hw_rx_max; + /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) @@ -1292,13 +1525,14 @@ static int xcan_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->dev = &pdev->dev; - priv->can.bittiming_const = &xcan_bittiming_const; + priv->can.bittiming_const = devtype->bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; + priv->devtype = *devtype; spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ @@ -1316,22 +1550,12 @@ static int xcan_probe(struct platform_device *pdev) ret = PTR_ERR(priv->can_clk); goto err_free; } - /* Check for type of CAN device */ - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,zynq-can-1.0")) { - priv->bus_clk = devm_clk_get(&pdev->dev, "pclk"); - if (IS_ERR(priv->bus_clk)) { - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); - goto err_free; - } - } else { - priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); - if (IS_ERR(priv->bus_clk)) { - dev_err(&pdev->dev, "bus clock not found\n"); - ret = PTR_ERR(priv->bus_clk); - goto err_free; - } + + priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); + if (IS_ERR(priv->bus_clk)) { + dev_err(&pdev->dev, "bus clock not found\n"); + ret = PTR_ERR(priv->bus_clk); + goto err_free; } priv->write_reg = xcan_write_reg_le; @@ -1364,9 +1588,9 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", - priv->reg_base, ndev->irq, priv->can.clock.freq, - tx_fifo_depth, priv->tx_max); + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", + priv->reg_base, ndev->irq, priv->can.clock.freq, + hw_tx_max, priv->tx_max); return 0; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2b81b97e994f..d3ce1e4cb4d3 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -5,7 +5,7 @@ source "drivers/net/dsa/b53/Kconfig" config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM && NET_DSA && OF_MDIO + depends on HAS_IOMEM && NET_DSA select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY @@ -52,6 +52,17 @@ config NET_DSA_QCA8K This enables support for the Qualcomm Atheros QCA8K Ethernet switch chips. +config NET_DSA_REALTEK_SMI + tristate "Realtek SMI Ethernet switch family support" + depends on NET_DSA + select FIXED_PHY + select IRQ_DOMAIN + select REALTEK_PHY + select REGMAP + ---help--- + This enables support for the Realtek SMI-based switch + chips, currently only RTL8366RB. + config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 @@ -76,4 +87,15 @@ config NET_DSA_SMSC_LAN9303_MDIO Enable access functions if the SMSC/Microchip LAN9303 is configured for MDIO managed mode. +config NET_DSA_VITESSE_VSC73XX + tristate "Vitesse VSC7385/7388/7395/7398 support" + depends on OF && SPI + depends on NET_DSA + select FIXED_PHY + select VITESSE_PHY + select GPIOLIB + ---help--- + This enables support for the Vitesse VSC7385, VSC7388, + VSC7395 and VSC7398 SparX integrated ethernet switches. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 15c2a831edf1..46c1cba91ffe 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -8,9 +8,12 @@ endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o +obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o +realtek-objs := realtek-smi.o rtl8366.o rtl8366rb.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o +obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o obj-y += b53/ obj-y += microchip/ obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 02e8982519ce..ac96ff40d37e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -220,7 +220,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 off, reg; + u32 reg; if (priv->wol_ports_mask & (1 << port)) return; @@ -231,11 +231,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); - if (dsa_is_cpu_port(ds, port)) - off = CORE_IMP_CTL; - else - off = CORE_G_PCTL_PORT(port); - b53_disable_port(ds, port, phy); /* Power down the port memory */ diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index bb28c701381a..0b5a2c31f395 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2819,6 +2819,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6165_ops = { @@ -2847,6 +2849,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .avb_ops = &mv88e6165_avb_ops, + .ptp_ops = &mv88e6165_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6171_ops = { @@ -3142,6 +3146,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6240_ops = { @@ -3184,6 +3190,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6290_ops = { @@ -3223,6 +3230,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6320_ops = { @@ -3261,6 +3269,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -3297,6 +3306,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -3338,6 +3348,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3411,6 +3422,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6352_ops = { @@ -3453,6 +3465,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .serdes_power = mv88e6352_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, .serdes_get_sset_count = mv88e6352_serdes_get_sset_count, .serdes_get_strings = mv88e6352_serdes_get_strings, .serdes_get_stats = mv88e6352_serdes_get_stats, @@ -3497,6 +3510,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_ops mv88e6390x_ops = { @@ -3538,6 +3552,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .serdes_power = mv88e6390_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, }; static const struct mv88e6xxx_info mv88e6xxx_table[] = { @@ -3689,6 +3704,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, + .ptp_support = true, .ops = &mv88e6161_ops, }, @@ -3711,6 +3727,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .pvt = true, .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, + .ptp_support = true, .ops = &mv88e6165_ops, }, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 8ac3fbb15352..6aa6197ddc10 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -155,6 +155,7 @@ struct mv88e6xxx_bus_ops; struct mv88e6xxx_irq_ops; struct mv88e6xxx_gpio_ops; struct mv88e6xxx_avb_ops; +struct mv88e6xxx_ptp_ops; struct mv88e6xxx_irq { u16 masked; @@ -273,6 +274,7 @@ struct mv88e6xxx_chip { struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO]; u16 trig_config; u16 evcap_config; + u16 enable_count; /* Per-port timestamping resources. */ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; @@ -439,6 +441,9 @@ struct mv88e6xxx_ops { /* Remote Management Unit operations */ int (*rmu_disable)(struct mv88e6xxx_chip *chip); + + /* Precision Time Protocol operations */ + const struct mv88e6xxx_ptp_ops *ptp_ops; }; struct mv88e6xxx_irq_ops { @@ -486,6 +491,24 @@ struct mv88e6xxx_avb_ops { int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data); }; +struct mv88e6xxx_ptp_ops { + u64 (*clock_read)(const struct cyclecounter *cc); + int (*ptp_enable)(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); + int (*ptp_verify)(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan); + void (*event_work)(struct work_struct *ugly); + int (*port_enable)(struct mv88e6xxx_chip *chip, int port); + int (*port_disable)(struct mv88e6xxx_chip *chip, int port); + int (*global_enable)(struct mv88e6xxx_chip *chip); + int (*global_disable)(struct mv88e6xxx_chip *chip); + int n_ext_ts; + int arr0_sts_reg; + int arr1_sts_reg; + int dep_sts_reg; + u32 rx_filters; +}; + #define STATS_TYPE_PORT BIT(0) #define STATS_TYPE_BANK0 BIT(1) #define STATS_TYPE_BANK1 BIT(2) diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 37e8ce2c72a0..194660d8c783 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -160,6 +160,7 @@ #define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000 #define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00 #define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe +#define MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf #define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00 #define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e @@ -335,6 +336,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; +extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops; extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops; @@ -484,6 +486,7 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; +static const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {}; static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {}; diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c index 2e398ccb88ca..672b503a67e1 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_avb.c +++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c @@ -130,6 +130,31 @@ const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = { .tai_write = mv88e6352_g2_avb_tai_write, }; +static int mv88e6165_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data, int len) +{ + return mv88e6352_g2_avb_port_ptp_read(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data, len); +} + +static int mv88e6165_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr, + u16 data) +{ + return mv88e6352_g2_avb_port_ptp_write(chip, + MV88E6165_G2_AVB_CMD_PORT_PTPGLOBAL, + addr, data); +} + +const struct mv88e6xxx_avb_ops mv88e6165_avb_ops = { + .port_ptp_read = mv88e6352_g2_avb_port_ptp_read, + .port_ptp_write = mv88e6352_g2_avb_port_ptp_write, + .ptp_read = mv88e6352_g2_avb_ptp_read, + .ptp_write = mv88e6352_g2_avb_ptp_write, + .tai_read = mv88e6165_g2_avb_tai_read, + .tai_write = mv88e6165_g2_avb_tai_write, +}; + static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip, int port, int addr, u16 *data, int len) diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index a036c490b7ce..a17c16a2ab78 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -51,17 +51,30 @@ static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr, return chip->info->ops->avb_ops->ptp_write(chip, addr, data); } +static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr, + u16 *data) +{ + if (!chip->info->ops->avb_ops->ptp_read) + return -EOPNOTSUPP; + + return chip->info->ops->avb_ops->ptp_read(chip, addr, data, 1); +} + /* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX * timestamp. When working properly, hardware will produce a timestamp * within 1ms. Software may enounter delays due to MDIO contention, so * the timeout is set accordingly. */ -#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20) +#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40) int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info) { - struct mv88e6xxx_chip *chip = ds->priv; + const struct mv88e6xxx_ptp_ops *ptp_ops; + struct mv88e6xxx_chip *chip; + + chip = ds->priv; + ptp_ops = chip->info->ops->ptp_ops; if (!chip->info->ptp_support) return -EOPNOTSUPP; @@ -74,17 +87,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + info->rx_filters = ptp_ops->rx_filters; return 0; } @@ -92,10 +95,9 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, struct hwtstamp_config *config) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; bool tstamp_enable = false; - u16 port_config0; - int err; /* Prevent the TX/RX paths from trying to interact with the * timestamp hardware while we reconfigure it. @@ -120,6 +122,14 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, /* The switch supports timestamping both L2 and L4; one cannot be * disabled independently of the other. */ + + if (!(BIT(config->rx_filter) & ptp_ops->rx_filters)) { + config->rx_filter = HWTSTAMP_FILTER_NONE; + dev_dbg(chip->dev, "Unsupported rx_filter %d\n", + config->rx_filter); + return -ERANGE; + } + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tstamp_enable = false; @@ -141,24 +151,22 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port, return -ERANGE; } + mutex_lock(&chip->reg_lock); if (tstamp_enable) { - /* Disable transportSpecific value matching, so that packets - * with either 1588 (0) and 802.1AS (1) will be timestamped. - */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH; + chip->enable_count += 1; + if (chip->enable_count == 1 && ptp_ops->global_enable) + ptp_ops->global_enable(chip); + if (ptp_ops->port_enable) + ptp_ops->port_enable(chip, port); } else { - /* Disable PTP. This disables both RX and TX timestamping. */ - port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP; + if (ptp_ops->port_disable) + ptp_ops->port_disable(chip, port); + chip->enable_count -= 1; + if (chip->enable_count == 0 && ptp_ops->global_disable) + ptp_ops->global_disable(chip); } - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - port_config0); mutex_unlock(&chip->reg_lock); - if (err < 0) - return err; - /* Once hardware has been configured, enable timestamp checks * in the RX/TX paths. */ @@ -338,17 +346,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip, static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct sk_buff *skb; skb = skb_dequeue(&ps->rx_queue); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr0_sts_reg, &ps->rx_queue); skb = skb_dequeue(&ps->rx_queue2); if (skb) - mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS, + mv88e6xxx_get_rxts(chip, ps, skb, ptp_ops->arr1_sts_reg, &ps->rx_queue2); } @@ -389,6 +398,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port, static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, struct mv88e6xxx_port_hwtstamp *ps) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct skb_shared_hwtstamps shhwtstamps; u16 departure_block[4], status; struct sk_buff *tmp_skb; @@ -401,7 +411,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_ptp_read(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, + ptp_ops->dep_sts_reg, departure_block, ARRAY_SIZE(departure_block)); mutex_unlock(&chip->reg_lock); @@ -425,8 +435,7 @@ static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip, /* We have the timestamp; go ahead and clear valid now */ mutex_lock(&chip->reg_lock); - mv88e6xxx_port_ptp_write(chip, ps->port_id, - MV88E6XXX_PORT_PTP_DEP_STS, 0); + mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0); mutex_unlock(&chip->reg_lock); status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK; @@ -522,8 +531,48 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, return true; } +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + val |= MV88E6165_PTP_CFG_DISABLE_PTP; + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_ptp_read(chip, MV88E6165_PTP_CFG, &val); + if (err) + return err; + + val &= ~(MV88E6165_PTP_CFG_DISABLE_PTP | MV88E6165_PTP_CFG_TSPEC_MASK); + + return mv88e6xxx_ptp_write(chip, MV88E6165_PTP_CFG, val); +} + +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); +} + +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port) +{ + return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, + MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH); +} + static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; ps->port_id = port; @@ -531,12 +580,15 @@ static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port) skb_queue_head_init(&ps->rx_queue); skb_queue_head_init(&ps->rx_queue2); - return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0, - MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP); + if (ptp_ops->port_disable) + return ptp_ops->port_disable(chip, port); + + return 0; } int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int err; int i; @@ -547,6 +599,18 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip) return err; } + /* Disable PTP globally */ + if (ptp_ops->global_disable) { + err = ptp_ops->global_disable(chip); + if (err) + return err; + } + + /* Set the ethertype of L2 PTP messages */ + err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588); + if (err) + return err; + /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to * timestamp. This affects all ports that have timestamping enabled, * but the timestamp config is per-port; thus we configure all events diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index bc71c9212a08..b9a72661bcc4 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -19,7 +19,7 @@ #include "chip.h" -/* Global PTP registers */ +/* Global 6352 PTP registers */ /* Offset 0x00: PTP EtherType */ #define MV88E6XXX_PTP_ETHERTYPE 0x00 @@ -34,6 +34,12 @@ /* Offset 0x02: Timestamp Arrival Capture Pointers */ #define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02 +/* Offset 0x05: PTP Global Configuration */ +#define MV88E6165_PTP_CFG 0x05 +#define MV88E6165_PTP_CFG_TSPEC_MASK 0xf000 +#define MV88E6165_PTP_CFG_DISABLE_TS_OVERWRITE BIT(1) +#define MV88E6165_PTP_CFG_DISABLE_PTP BIT(0) + /* Offset 0x07: PTP Global Configuration */ #define MV88E6341_PTP_CFG 0x07 #define MV88E6341_PTP_CFG_UPDATE 0x8000 @@ -46,7 +52,7 @@ /* Offset 0x08: PTP Interrupt Status */ #define MV88E6XXX_PTP_IRQ_STATUS 0x08 -/* Per-Port PTP Registers */ +/* Per-Port 6352 PTP Registers */ /* Offset 0x00: PTP Configuration 0 */ #define MV88E6XXX_PORT_PTP_CFG0 0x00 #define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12 @@ -123,6 +129,10 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); +int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port); +int mv88e6352_hwtstamp_port_disable(struct mv88e6xxx_chip *chip, int port); +int mv88e6165_global_enable(struct mv88e6xxx_chip *chip); +int mv88e6165_global_disable(struct mv88e6xxx_chip *chip); #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index bd85e2c390e1..4b336d8d4c67 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -16,6 +16,7 @@ #include "chip.h" #include "global2.h" +#include "hwtstamp.h" #include "ptp.h" /* Raw timestamps are in units of 8-ns clock periods. */ @@ -50,7 +51,7 @@ static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data) } /* TODO: places where this are called should be using pinctrl */ -static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, +static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, int func, int input) { int err; @@ -65,7 +66,7 @@ static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin, return chip->info->ops->gpio_ops->set_pctl(chip, pin, func); } -static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc) { struct mv88e6xxx_chip *chip = cc_to_chip(cc); u16 phc_time[2]; @@ -79,13 +80,27 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) return ((u32)phc_time[1] << 16) | phc_time[0]; } -/* mv88e6xxx_config_eventcap - configure TAI event capture +static u64 mv88e6165_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + u16 phc_time[2]; + int err; + + err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time, + ARRAY_SIZE(phc_time)); + if (err) + return 0; + else + return ((u32)phc_time[1] << 16) | phc_time[0]; +} + +/* mv88e6352_config_eventcap - configure TAI event capture * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external) * @rising: zero for falling-edge trigger, else rising-edge trigger * * This will also reset the capture sequence counter. */ -static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, +static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event, int rising) { u16 global_config; @@ -118,7 +133,7 @@ static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event, return err; } -static void mv88e6xxx_tai_event_work(struct work_struct *ugly) +static void mv88e6352_tai_event_work(struct work_struct *ugly) { struct delayed_work *dw = to_delayed_work(ugly); struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw); @@ -232,7 +247,7 @@ static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp, return 0; } -static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, +static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip, struct ptp_clock_request *rq, int on) { int rising = (rq->extts.flags & PTP_RISING_EDGE); @@ -250,18 +265,18 @@ static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip, if (on) { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); if (err) goto out; schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL); - err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); + err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising); } else { func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO; - err = mv88e6xxx_set_gpio_func(chip, pin, func, true); + err = mv88e6352_set_gpio_func(chip, pin, func, true); cancel_delayed_work_sync(&chip->tai_event_work); } @@ -272,20 +287,20 @@ out: return err; } -static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp, +static int mv88e6352_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct mv88e6xxx_chip *chip = ptp_to_chip(ptp); switch (rq->type) { case PTP_CLK_REQ_EXTTS: - return mv88e6xxx_ptp_enable_extts(chip, rq, on); + return mv88e6352_ptp_enable_extts(chip, rq, on); default: return -EOPNOTSUPP; } } -static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, +static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { switch (func) { @@ -299,6 +314,55 @@ static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, return 0; } +const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = { + .clock_read = mv88e6352_ptp_clock_read, + .ptp_enable = mv88e6352_ptp_enable, + .ptp_verify = mv88e6352_ptp_verify, + .event_work = mv88e6352_tai_event_work, + .port_enable = mv88e6352_hwtstamp_port_enable, + .port_disable = mv88e6352_hwtstamp_port_disable, + .n_ext_ts = 1, + .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), +}; + +const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = { + .clock_read = mv88e6165_ptp_clock_read, + .global_enable = mv88e6165_global_enable, + .global_disable = mv88e6165_global_disable, + .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS, + .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS, + .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS, + .rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ), +}; + +static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc) +{ + struct mv88e6xxx_chip *chip = cc_to_chip(cc); + + if (chip->info->ops->ptp_ops->clock_read) + return chip->info->ops->ptp_ops->clock_read(cc); + + return 0; +} + /* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3 * seconds; this task forces periodic reads so that we don't miss any. */ @@ -317,6 +381,7 @@ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work) int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) { + const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops; int i; /* Set up the cycle counter */ @@ -330,14 +395,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) ktime_to_ns(ktime_get_real())); INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check); - INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work); + if (ptp_ops->event_work) + INIT_DELAYED_WORK(&chip->tai_event_work, ptp_ops->event_work); chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), dev_name(chip->dev)); chip->ptp_clock_info.max_adj = 1000000; - chip->ptp_clock_info.n_ext_ts = 1; + chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; chip->ptp_clock_info.n_per_out = 0; chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip); chip->ptp_clock_info.pps = 0; @@ -355,8 +421,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime; chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime; chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime; - chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable; - chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify; + chip->ptp_clock_info.enable = ptp_ops->ptp_enable; + chip->ptp_clock_info.verify = ptp_ops->ptp_verify; chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work; chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev); @@ -373,7 +439,8 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { if (chip->ptp_clock) { cancel_delayed_work_sync(&chip->overflow_work); - cancel_delayed_work_sync(&chip->tai_event_work); + if (chip->info->ops->ptp_ops->event_work) + cancel_delayed_work_sync(&chip->tai_event_work); ptp_clock_unregister(chip->ptp_clock); chip->ptp_clock = NULL; diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h index 10f271ab650d..28a030840517 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.h +++ b/drivers/net/dsa/mv88e6xxx/ptp.h @@ -78,6 +78,71 @@ /* Offset 0x12: Lock Status */ #define MV88E6XXX_TAI_LOCK_STATUS 0x12 +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + +/* 6165 Global Control Registers */ +/* Offset 0x00: Ether Type */ +#define MV88E6XXX_PTP_GC_ETYPE 0x00 + +/* Offset 0x01: Message ID */ +#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01 + +/* Offset 0x02: Time Stamp Arrive Time */ +#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02 + +/* Offset 0x03: Port Arrival Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03 + +/* Offset 0x04: Port Departure Interrupt Enable */ +#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04 + +/* Offset 0x05: Configuration */ +#define MV88E6XXX_PTP_GC_CONFIG 0x05 +#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1) +#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0) + +/* Offset 0x8: Interrupt Status */ +#define MV88E6XXX_PTP_GC_INT_STATUS 0x08 + +/* Offset 0x9/0xa: Global Time */ +#define MV88E6XXX_PTP_GC_TIME_LO 0x09 +#define MV88E6XXX_PTP_GC_TIME_HI 0x0A + +/* 6165 Per Port Registers */ +/* Offset 0: Arrival Time 0 Status */ +#define MV88E6165_PORT_PTP_ARR0_STS 0x00 + +/* Offset 0x01/0x02: PTP Arrival 0 Time */ +#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01 +#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02 + +/* Offset 0x03: PTP Arrival 0 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03 + +/* Offset 0x04: PTP Arrival 1 Status */ +#define MV88E6165_PORT_PTP_ARR1_STS 0x04 + +/* Offset 0x05/0x6E: PTP Arrival 1 Time */ +#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05 +#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06 + +/* Offset 0x07: PTP Arrival 1 Sequence ID */ +#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07 + +/* Offset 0x08: PTP Departure Status */ +#define MV88E6165_PORT_PTP_DEP_STS 0x08 + +/* Offset 0x09/0x0a: PTP Deperture Time */ +#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09 +#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a + +/* Offset 0x0b: PTP Departure Sequence ID */ +#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b + +/* Offset 0x0d: Port Status */ +#define MV88E6164_PORT_STATUS 0x0d + #ifdef CONFIG_NET_DSA_MV88E6XXX_PTP long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp); @@ -87,6 +152,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip); #define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \ ptp_clock_info) +extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops; +extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops; + #else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */ static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp) @@ -103,6 +171,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip) { } +static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {}; +static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {}; + #endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */ #endif /* _MV88E6XXX_PTP_H */ diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c new file mode 100644 index 000000000000..b4b839a1d095 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Realtek Simple Management Interface (SMI) driver + * It can be discussed how "simple" this interface is. + * + * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels + * but the protocol is not MDIO at all. Instead it is a Realtek + * pecularity that need to bit-bang the lines in a special way to + * communicate with the switch. + * + * ASICs we intend to support with this driver: + * + * RTL8366 - The original version, apparently + * RTL8369 - Similar enough to have the same datsheet as RTL8366 + * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite + * different register layout from the other two + * RTL8366S - Is this "RTL8366 super"? + * RTL8367 - Has an OpenWRT driver as well + * RTL8368S - Seems to be an alternative name for RTL8366RB + * RTL8370 - Also uses SMI + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/bitops.h> +#include <linux/if_bridge.h> + +#include "realtek-smi.h" + +#define REALTEK_SMI_ACK_RETRY_COUNT 5 +#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */ +#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */ + +static inline void realtek_smi_clk_delay(struct realtek_smi *smi) +{ + ndelay(smi->clk_delay); +} + +static void realtek_smi_start(struct realtek_smi *smi) +{ + /* Set GPIO pins to output mode, with initial state: + * SCK = 0, SDA = 1 + */ + gpiod_direction_output(smi->mdc, 0); + gpiod_direction_output(smi->mdio, 1); + realtek_smi_clk_delay(smi); + + /* CLK 1: 0 -> 1, 1 -> 0 */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + + /* CLK 2: */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); +} + +static void realtek_smi_stop(struct realtek_smi *smi) +{ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 0); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdio, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Add a click */ + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 1); + + /* Set GPIO pins to input mode */ + gpiod_direction_input(smi->mdio); + gpiod_direction_input(smi->mdc); +} + +static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len) +{ + for (; len > 0; len--) { + realtek_smi_clk_delay(smi); + + /* Prepare data */ + gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1)))); + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + gpiod_set_value(smi->mdc, 0); + } +} + +static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data) +{ + gpiod_direction_input(smi->mdio); + + for (*data = 0; len > 0; len--) { + u32 u; + + realtek_smi_clk_delay(smi); + + /* Clocking */ + gpiod_set_value(smi->mdc, 1); + realtek_smi_clk_delay(smi); + u = !!gpiod_get_value(smi->mdio); + gpiod_set_value(smi->mdc, 0); + + *data |= (u << (len - 1)); + } + + gpiod_direction_output(smi->mdio, 0); +} + +static int realtek_smi_wait_for_ack(struct realtek_smi *smi) +{ + int retry_cnt; + + retry_cnt = 0; + do { + u32 ack; + + realtek_smi_read_bits(smi, 1, &ack); + if (ack == 0) + break; + + if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) { + dev_err(smi->dev, "ACK timeout\n"); + return -ETIMEDOUT; + } + } while (1); + + return 0; +} + +static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return realtek_smi_wait_for_ack(smi); +} + +static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data) +{ + realtek_smi_write_bits(smi, data, 8); + return 0; +} + +static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x00, 1); + + return 0; +} + +static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data) +{ + u32 t; + + /* Read data */ + realtek_smi_read_bits(smi, 8, &t); + *data = (t & 0xff); + + /* Send an ACK */ + realtek_smi_write_bits(smi, 0x01, 1); + + return 0; +} + +static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data) +{ + unsigned long flags; + u8 lo = 0; + u8 hi = 0; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send READ command */ + ret = realtek_smi_write_byte(smi, smi->cmd_read); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Read DATA[7:0] */ + realtek_smi_read_byte0(smi, &lo); + /* Read DATA[15:8] */ + realtek_smi_read_byte1(smi, &hi); + + *data = ((u32)lo) | (((u32)hi) << 8); + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +static int realtek_smi_write_reg(struct realtek_smi *smi, + u32 addr, u32 data, bool ack) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&smi->lock, flags); + + realtek_smi_start(smi); + + /* Send WRITE command */ + ret = realtek_smi_write_byte(smi, smi->cmd_write); + if (ret) + goto out; + + /* Set ADDR[7:0] */ + ret = realtek_smi_write_byte(smi, addr & 0xff); + if (ret) + goto out; + + /* Set ADDR[15:8] */ + ret = realtek_smi_write_byte(smi, addr >> 8); + if (ret) + goto out; + + /* Write DATA[7:0] */ + ret = realtek_smi_write_byte(smi, data & 0xff); + if (ret) + goto out; + + /* Write DATA[15:8] */ + if (ack) + ret = realtek_smi_write_byte(smi, data >> 8); + else + ret = realtek_smi_write_byte_noack(smi, data >> 8); + if (ret) + goto out; + + ret = 0; + + out: + realtek_smi_stop(smi); + spin_unlock_irqrestore(&smi->lock, flags); + + return ret; +} + +/* There is one single case when we need to use this accessor and that + * is when issueing soft reset. Since the device reset as soon as we write + * that bit, no ACK will come back for natural reasons. + */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data) +{ + return realtek_smi_write_reg(smi, addr, data, false); +} +EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack); + +/* Regmap accessors */ + +static int realtek_smi_write(void *ctx, u32 reg, u32 val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_write_reg(smi, reg, val, true); +} + +static int realtek_smi_read(void *ctx, u32 reg, u32 *val) +{ + struct realtek_smi *smi = ctx; + + return realtek_smi_read_reg(smi, reg, val); +} + +static const struct regmap_config realtek_smi_mdio_regmap_config = { + .reg_bits = 10, /* A4..A0 R4..R0 */ + .val_bits = 16, + .reg_stride = 1, + /* PHY regs are at 0x8000 */ + .max_register = 0xffff, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .reg_read = realtek_smi_read, + .reg_write = realtek_smi_write, + .cache_type = REGCACHE_NONE, +}; + +static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_read(smi, addr, regnum); +} + +static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct realtek_smi *smi = bus->priv; + + return smi->ops->phy_write(smi, addr, regnum, val); +} + +int realtek_smi_setup_mdio(struct realtek_smi *smi) +{ + struct device_node *mdio_np; + int ret; + + mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, + "realtek,smi-mdio"); + if (!mdio_np) { + dev_err(smi->dev, "no MDIO bus node\n"); + return -ENODEV; + } + + smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); + if (!smi->slave_mii_bus) + return -ENOMEM; + smi->slave_mii_bus->priv = smi; + smi->slave_mii_bus->name = "SMI slave MII"; + smi->slave_mii_bus->read = realtek_smi_mdio_read; + smi->slave_mii_bus->write = realtek_smi_mdio_write; + snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d", + smi->ds->index); + smi->slave_mii_bus->dev.of_node = mdio_np; + smi->slave_mii_bus->parent = smi->dev; + smi->ds->slave_mii_bus = smi->slave_mii_bus; + + ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np); + if (ret) { + dev_err(smi->dev, "unable to register MDIO bus %s\n", + smi->slave_mii_bus->id); + of_node_put(mdio_np); + } + + return 0; +} + +static int realtek_smi_probe(struct platform_device *pdev) +{ + const struct realtek_smi_variant *var; + struct device *dev = &pdev->dev; + struct realtek_smi *smi; + struct device_node *np; + int ret; + + var = of_device_get_match_data(dev); + np = dev->of_node; + + smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL); + if (!smi) + return -ENOMEM; + smi->map = devm_regmap_init(dev, NULL, smi, + &realtek_smi_mdio_regmap_config); + if (IS_ERR(smi->map)) { + ret = PTR_ERR(smi->map); + dev_err(dev, "regmap init failed: %d\n", ret); + return ret; + } + + /* Link forward and backward */ + smi->dev = dev; + smi->clk_delay = var->clk_delay; + smi->cmd_read = var->cmd_read; + smi->cmd_write = var->cmd_write; + smi->ops = var->ops; + + dev_set_drvdata(dev, smi); + spin_lock_init(&smi->lock); + + /* TODO: if power is software controlled, set up any regulators here */ + + /* Assert then deassert RESET */ + smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(smi->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(smi->reset); + } + msleep(REALTEK_SMI_HW_STOP_DELAY); + gpiod_set_value(smi->reset, 0); + msleep(REALTEK_SMI_HW_START_DELAY); + dev_info(dev, "deasserted RESET\n"); + + /* Fetch MDIO pins */ + smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdc)) + return PTR_ERR(smi->mdc); + smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW); + if (IS_ERR(smi->mdio)) + return PTR_ERR(smi->mdio); + + smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds"); + + ret = smi->ops->detect(smi); + if (ret) { + dev_err(dev, "unable to detect switch\n"); + return ret; + } + + smi->ds = dsa_switch_alloc(dev, smi->num_ports); + if (!smi->ds) + return -ENOMEM; + smi->ds->priv = smi; + + smi->ds->ops = var->ds_ops; + ret = dsa_register_switch(smi->ds); + if (ret) { + dev_err(dev, "unable to register switch ret = %d\n", ret); + return ret; + } + return 0; +} + +static int realtek_smi_remove(struct platform_device *pdev) +{ + struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); + + dsa_unregister_switch(smi->ds); + gpiod_set_value(smi->reset, 1); + + return 0; +} + +static const struct of_device_id realtek_smi_of_match[] = { + { + .compatible = "realtek,rtl8366rb", + .data = &rtl8366rb_variant, + }, + { + /* FIXME: add support for RTL8366S and more */ + .compatible = "realtek,rtl8366s", + .data = NULL, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, realtek_smi_of_match); + +static struct platform_driver realtek_smi_driver = { + .driver = { + .name = "realtek-smi", + .of_match_table = of_match_ptr(realtek_smi_of_match), + }, + .probe = realtek_smi_probe, + .remove = realtek_smi_remove, +}; +module_platform_driver(realtek_smi_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h new file mode 100644 index 000000000000..9a63b51e1d82 --- /dev/null +++ b/drivers/net/dsa/realtek-smi.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Realtek SMI interface driver defines + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + */ + +#ifndef _REALTEK_SMI_H +#define _REALTEK_SMI_H + +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/gpio/consumer.h> +#include <net/dsa.h> + +struct realtek_smi_ops; +struct dentry; +struct inode; +struct file; + +struct rtl8366_mib_counter { + unsigned int base; + unsigned int offset; + unsigned int length; + const char *name; +}; + +struct rtl8366_vlan_mc { + u16 vid; + u16 untag; + u16 member; + u8 fid; + u8 priority; +}; + +struct rtl8366_vlan_4k { + u16 vid; + u16 untag; + u16 member; + u8 fid; +}; + +struct realtek_smi { + struct device *dev; + struct gpio_desc *reset; + struct gpio_desc *mdc; + struct gpio_desc *mdio; + struct regmap *map; + struct mii_bus *slave_mii_bus; + + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; + spinlock_t lock; /* Locks around command writes */ + struct dsa_switch *ds; + struct irq_domain *irqdomain; + bool leds_disabled; + + unsigned int cpu_port; + unsigned int num_ports; + unsigned int num_vlan_mc; + unsigned int num_mib_counters; + struct rtl8366_mib_counter *mib_counters; + + const struct realtek_smi_ops *ops; + + int vlan_enabled; + int vlan4k_enabled; + + char buf[4096]; +}; + +/** + * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations + * @detect: detects the chiptype + */ +struct realtek_smi_ops { + int (*detect)(struct realtek_smi *smi); + int (*reset_chip)(struct realtek_smi *smi); + int (*setup)(struct realtek_smi *smi); + void (*cleanup)(struct realtek_smi *smi); + int (*get_mib_counter)(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue); + int (*get_vlan_mc)(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc); + int (*set_vlan_mc)(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc); + int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k); + int (*set_vlan_4k)(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k); + int (*get_mc_index)(struct realtek_smi *smi, int port, int *val); + int (*set_mc_index)(struct realtek_smi *smi, int port, int index); + bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan); + int (*enable_vlan)(struct realtek_smi *smi, bool enable); + int (*enable_vlan4k)(struct realtek_smi *smi, bool enable); + int (*enable_port)(struct realtek_smi *smi, int port, bool enable); + int (*phy_read)(struct realtek_smi *smi, int phy, int regnum); + int (*phy_write)(struct realtek_smi *smi, int phy, int regnum, + u16 val); +}; + +struct realtek_smi_variant { + const struct dsa_switch_ops *ds_ops; + const struct realtek_smi_ops *ops; + unsigned int clk_delay; + u8 cmd_read; + u8 cmd_write; +}; + +/* SMI core calls */ +int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr, + u32 data); +int realtek_smi_setup_mdio(struct realtek_smi *smi); + +/* RTL8366 library helpers */ +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid); +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val); +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid); +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); +int rtl8366_reset_vlan(struct realtek_smi *smi); +int rtl8366_init_vlan(struct realtek_smi *smi); +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering); +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data); +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset); +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); + +extern const struct realtek_smi_variant rtl8366rb_variant; + +#endif /* _REALTEK_SMI_H */ diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c new file mode 100644 index 000000000000..6dedd43442cc --- /dev/null +++ b/drivers/net/dsa/rtl8366.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI library helpers for the RTL8366x variants + * RTL8366RB and RTL8366S + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + */ +#include <linux/if_bridge.h> +#include <net/dsa.h> + +#include "realtek-smi.h" + +int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used) +{ + int ret; + int i; + + *used = 0; + for (i = 0; i < smi->num_ports; i++) { + int index = 0; + + ret = smi->ops->get_mc_index(smi, i, &index); + if (ret) + return ret; + + if (mc_index == index) { + *used = 1; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); + +int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, + u32 untag, u32 fid) +{ + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Update the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlan4k.member = member; + vlan4k.untag = untag; + vlan4k.fid = fid; + ret = smi->ops->set_vlan_4k(smi, &vlan4k); + if (ret) + return ret; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* update the MC entry */ + vlanmc.member = member; + vlanmc.untag = untag; + vlanmc.fid = fid; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_set_vlan); + +int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val) +{ + struct rtl8366_vlan_mc vlanmc; + int ret; + int index; + + ret = smi->ops->get_mc_index(smi, port, &index); + if (ret) + return ret; + + ret = smi->ops->get_vlan_mc(smi, index, &vlanmc); + if (ret) + return ret; + + *val = vlanmc.vid; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_get_pvid); + +int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, + unsigned int vid) +{ + struct rtl8366_vlan_mc vlanmc; + struct rtl8366_vlan_4k vlan4k; + int ret; + int i; + + /* Try to find an existing MC entry for this VID */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* We have no MC entry for this VID, try to find an empty one */ + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vlanmc.vid == 0 && vlanmc.member == 0) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + /* MC table is full, try to find an unused entry and replace it */ + for (i = 0; i < smi->num_vlan_mc; i++) { + int used; + + ret = rtl8366_mc_is_used(smi, i, &used); + if (ret) + return ret; + + if (!used) { + /* Update the entry from the 4K table */ + ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); + if (ret) + return ret; + + vlanmc.vid = vid; + vlanmc.member = vlan4k.member; + vlanmc.untag = vlan4k.untag; + vlanmc.fid = vlan4k.fid; + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + ret = smi->ops->set_mc_index(smi, port, i); + return ret; + } + } + + dev_err(smi->dev, + "all VLAN member configurations are in use\n"); + + return -ENOSPC; +} +EXPORT_SYMBOL_GPL(rtl8366_set_pvid); + +int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + int ret; + + /* To enable 4k VLAN, ordinary VLAN must be enabled first, + * but if we disable 4k VLAN it is fine to leave ordinary + * VLAN enabled. + */ + if (enable) { + /* Make sure VLAN is ON */ + ret = smi->ops->enable_vlan(smi, true); + if (ret) + return ret; + + smi->vlan_enabled = true; + } + + ret = smi->ops->enable_vlan4k(smi, enable); + if (ret) + return ret; + + smi->vlan4k_enabled = enable; + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k); + +int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable) +{ + int ret; + + ret = smi->ops->enable_vlan(smi, enable); + if (ret) + return ret; + + smi->vlan_enabled = enable; + + /* If we turn VLAN off, make sure that we turn off + * 4k VLAN as well, if that happened to be on. + */ + if (!enable) { + smi->vlan4k_enabled = false; + ret = smi->ops->enable_vlan4k(smi, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtl8366_enable_vlan); + +int rtl8366_reset_vlan(struct realtek_smi *smi) +{ + struct rtl8366_vlan_mc vlanmc; + int ret; + int i; + + rtl8366_enable_vlan(smi, false); + rtl8366_enable_vlan4k(smi, false); + + /* Clear the 16 VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + for (i = 0; i < smi->num_vlan_mc; i++) { + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); + +int rtl8366_init_vlan(struct realtek_smi *smi) +{ + int port; + int ret; + + ret = rtl8366_reset_vlan(smi); + if (ret) + return ret; + + /* Loop over the available ports, for each port, associate + * it with the VLAN (port+1) + */ + for (port = 0; port < smi->num_ports; port++) { + u32 mask; + + if (port == smi->cpu_port) + /* For the CPU port, make all ports members of this + * VLAN. + */ + mask = GENMASK(smi->num_ports - 1, 0); + else + /* For all other ports, enable itself plus the + * CPU port. + */ + mask = BIT(port) | BIT(smi->cpu_port); + + /* For each port, set the port as member of VLAN (port+1) + * and untagged, except for the CPU port: the CPU port (5) is + * member of VLAN 6 and so are ALL the other ports as well. + * Use filter 0 (no filter). + */ + dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", + (port + 1), port, mask); + ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); + if (ret) + return ret; + + dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", + (port + 1), port, (port + 1)); + ret = rtl8366_set_pvid(smi, port, (port + 1)); + if (ret) + return ret; + } + + return rtl8366_enable_vlan(smi, true); +} +EXPORT_SYMBOL_GPL(rtl8366_init_vlan); + +int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_vlan_4k vlan4k; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "%s filtering on port %d\n", + vlan_filtering ? "enable" : "disable", + port); + + /* TODO: + * The hardware support filter ID (FID) 0..7, I have no clue how to + * support this in the driver when the callback only says on/off. + */ + ret = smi->ops->get_vlan_4k(smi, port, &vlan4k); + if (ret) + return ret; + + /* Just set the filter to FID 1 for now then */ + ret = rtl8366_set_vlan(smi, port, + vlan4k.member, + vlan4k.untag, + 1); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); + +int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return -EINVAL; + + dev_info(smi->dev, "prepare VLANs %04x..%04x\n", + vlan->vid_begin, vlan->vid_end); + + /* Enable VLAN in the hardware + * FIXME: what's with this 4k business? + * Just rtl8366_enable_vlan() seems inconclusive. + */ + ret = rtl8366_enable_vlan4k(smi, true); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare); + +void rtl8366_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID); + struct realtek_smi *smi = ds->priv; + u32 member = 0; + u32 untag = 0; + u16 vid; + int ret; + + if (!smi->ops->is_vlan_valid(smi, port)) + return; + + dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", + port, + untagged ? "untagged" : "tagged", + pvid ? " PVID" : "no PVID"); + + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) + dev_err(smi->dev, "port is DSA or CPU port\n"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int pvid_val = 0; + + dev_info(smi->dev, "add VLAN %04x\n", vid); + member |= BIT(port); + + if (untagged) + untag |= BIT(port); + + /* To ensure that we have a valid MC entry for this VLAN, + * initialize the port VLAN ID here. + */ + ret = rtl8366_get_pvid(smi, port, &pvid_val); + if (ret < 0) { + dev_err(smi->dev, "could not lookup PVID for port %d\n", + port); + return; + } + if (pvid_val == 0) { + ret = rtl8366_set_pvid(smi, port, vid); + if (ret < 0) + return; + } + } + + ret = rtl8366_set_vlan(smi, port, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_add); + +int rtl8366_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct realtek_smi *smi = ds->priv; + u16 vid; + int ret; + + dev_info(smi->dev, "del VLAN on port %d\n", port); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + int i; + + dev_info(smi->dev, "del VLAN %04x\n", vid); + + for (i = 0; i < smi->num_vlan_mc; i++) { + struct rtl8366_vlan_mc vlanmc; + + ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); + if (ret) + return ret; + + if (vid == vlanmc.vid) { + /* clear VLAN member configurations */ + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.member = 0; + vlanmc.untag = 0; + vlanmc.fid = 0; + + ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + if (ret) { + dev_err(smi->dev, + "failed to remove VLAN %04x\n", + vid); + return ret; + } + break; + } + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl8366_vlan_del); + +void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366_mib_counter *mib; + int i; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + mib = &smi->mib_counters[i]; + strncpy(data + i * ETH_GSTRING_LEN, + mib->name, ETH_GSTRING_LEN); + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_strings); + +int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct realtek_smi *smi = ds->priv; + + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + if (port >= smi->num_ports) + return -EINVAL; + + return smi->num_mib_counters; +} +EXPORT_SYMBOL_GPL(rtl8366_get_sset_count); + +void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct realtek_smi *smi = ds->priv; + int i; + int ret; + + if (port >= smi->num_ports) + return; + + for (i = 0; i < smi->num_mib_counters; i++) { + struct rtl8366_mib_counter *mib; + u64 mibvalue = 0; + + mib = &smi->mib_counters[i]; + ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue); + if (ret) { + dev_err(smi->dev, "error reading MIB counter %s\n", + mib->name); + } + data[i] = mibvalue; + } +} +EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats); diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c new file mode 100644 index 000000000000..1e55b9bf8b56 --- /dev/null +++ b/drivers/net/dsa/rtl8366rb.c @@ -0,0 +1,1424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch + * + * This is a sparsely documented chip, the only viable documentation seems + * to be a patched up code drop from the vendor that appear in various + * GPL source trees. + * + * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com> + * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv> + * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com> + */ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> + +#include "realtek-smi.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* Switch Global Configuration register */ +#define RTL8366RB_SGCR 0x0000 +#define RTL8366RB_SGCR_EN_BC_STORM_CTRL BIT(0) +#define RTL8366RB_SGCR_MAX_LENGTH(a) ((a) << 4) +#define RTL8366RB_SGCR_MAX_LENGTH_MASK RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0) +#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1) +#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2) +#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3) +#define RTL8366RB_SGCR_EN_VLAN BIT(13) +#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14) + +/* Port Enable Control register */ +#define RTL8366RB_PECR 0x0001 + +/* Switch Security Control registers */ +#define RTL8366RB_SSCR0 0x0002 +#define RTL8366RB_SSCR1 0x0003 +#define RTL8366RB_SSCR2 0x0004 +#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0) + +/* Port Mirror Control Register */ +#define RTL8366RB_PMCR 0x0007 +#define RTL8366RB_PMCR_SOURCE_PORT(a) (a) +#define RTL8366RB_PMCR_SOURCE_PORT_MASK 0x000f +#define RTL8366RB_PMCR_MONITOR_PORT(a) ((a) << 4) +#define RTL8366RB_PMCR_MONITOR_PORT_MASK 0x00f0 +#define RTL8366RB_PMCR_MIRROR_RX BIT(8) +#define RTL8366RB_PMCR_MIRROR_TX BIT(9) +#define RTL8366RB_PMCR_MIRROR_SPC BIT(10) +#define RTL8366RB_PMCR_MIRROR_ISO BIT(11) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PAACR0 0x0010 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PAACR1 0x0011 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PAACR2 0x0012 +#define RTL8366RB_PAACR_SPEED_10M 0 +#define RTL8366RB_PAACR_SPEED_100M 1 +#define RTL8366RB_PAACR_SPEED_1000M 2 +#define RTL8366RB_PAACR_FULL_DUPLEX BIT(2) +#define RTL8366RB_PAACR_LINK_UP BIT(4) +#define RTL8366RB_PAACR_TX_PAUSE BIT(5) +#define RTL8366RB_PAACR_RX_PAUSE BIT(6) +#define RTL8366RB_PAACR_AN BIT(7) + +#define RTL8366RB_PAACR_CPU_PORT (RTL8366RB_PAACR_SPEED_1000M | \ + RTL8366RB_PAACR_FULL_DUPLEX | \ + RTL8366RB_PAACR_LINK_UP | \ + RTL8366RB_PAACR_TX_PAUSE | \ + RTL8366RB_PAACR_RX_PAUSE) + +/* bits 0..7 = port 0, bits 8..15 = port 1 */ +#define RTL8366RB_PSTAT0 0x0014 +/* bits 0..7 = port 2, bits 8..15 = port 3 */ +#define RTL8366RB_PSTAT1 0x0015 +/* bits 0..7 = port 4, bits 8..15 = port 5 */ +#define RTL8366RB_PSTAT2 0x0016 + +#define RTL8366RB_POWER_SAVING_REG 0x0021 + +/* CPU port control reg */ +#define RTL8368RB_CPU_CTRL_REG 0x0061 +#define RTL8368RB_CPU_PORTS_MSK 0x00FF +/* Enables inserting custom tag length/type 0x8899 */ +#define RTL8368RB_CPU_INSTAG BIT(15) + +#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */ +#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */ +#define RTL8366RB_SMAR2 0x0072 /* bits 32..47 */ + +#define RTL8366RB_RESET_CTRL_REG 0x0100 +#define RTL8366RB_CHIP_CTRL_RESET_HW BIT(0) +#define RTL8366RB_CHIP_CTRL_RESET_SW BIT(1) + +#define RTL8366RB_CHIP_ID_REG 0x0509 +#define RTL8366RB_CHIP_ID_8366 0x5937 +#define RTL8366RB_CHIP_VERSION_CTRL_REG 0x050A +#define RTL8366RB_CHIP_VERSION_MASK 0xf + +/* PHY registers control */ +#define RTL8366RB_PHY_ACCESS_CTRL_REG 0x8000 +#define RTL8366RB_PHY_CTRL_READ BIT(0) +#define RTL8366RB_PHY_CTRL_WRITE 0 +#define RTL8366RB_PHY_ACCESS_BUSY_REG 0x8001 +#define RTL8366RB_PHY_INT_BUSY BIT(0) +#define RTL8366RB_PHY_EXT_BUSY BIT(4) +#define RTL8366RB_PHY_ACCESS_DATA_REG 0x8002 +#define RTL8366RB_PHY_EXT_CTRL_REG 0x8010 +#define RTL8366RB_PHY_EXT_WRDATA_REG 0x8011 +#define RTL8366RB_PHY_EXT_RDDATA_REG 0x8012 + +#define RTL8366RB_PHY_REG_MASK 0x1f +#define RTL8366RB_PHY_PAGE_OFFSET 5 +#define RTL8366RB_PHY_PAGE_MASK (0xf << 5) +#define RTL8366RB_PHY_NO_OFFSET 9 +#define RTL8366RB_PHY_NO_MASK (0x1f << 9) + +#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f + +/* LED control registers */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_OFF 0x0 +#define RTL8366RB_LED_DUP_COL 0x1 +#define RTL8366RB_LED_LINK_ACT 0x2 +#define RTL8366RB_LED_SPD1000 0x3 +#define RTL8366RB_LED_SPD100 0x4 +#define RTL8366RB_LED_SPD10 0x5 +#define RTL8366RB_LED_SPD1000_ACT 0x6 +#define RTL8366RB_LED_SPD100_ACT 0x7 +#define RTL8366RB_LED_SPD10_ACT 0x8 +#define RTL8366RB_LED_SPD100_10_ACT 0x9 +#define RTL8366RB_LED_FIBER 0xa +#define RTL8366RB_LED_AN_FAULT 0xb +#define RTL8366RB_LED_LINK_RX 0xc +#define RTL8366RB_LED_LINK_TX 0xd +#define RTL8366RB_LED_MASTER 0xe +#define RTL8366RB_LED_FORCE 0xf +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_1_OFFSET 6 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_3_OFFSET 6 + +#define RTL8366RB_MIB_COUNT 33 +#define RTL8366RB_GLOBAL_MIB_COUNT 1 +#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 +#define RTL8366RB_MIB_COUNTER_BASE 0x1000 +#define RTL8366RB_MIB_CTRL_REG 0x13F0 +#define RTL8366RB_MIB_CTRL_USER_MASK 0x0FFC +#define RTL8366RB_MIB_CTRL_BUSY_MASK BIT(0) +#define RTL8366RB_MIB_CTRL_RESET_MASK BIT(1) +#define RTL8366RB_MIB_CTRL_PORT_RESET(_p) BIT(2 + (_p)) +#define RTL8366RB_MIB_CTRL_GLOBAL_RESET BIT(11) + +#define RTL8366RB_PORT_VLAN_CTRL_BASE 0x0063 +#define RTL8366RB_PORT_VLAN_CTRL_REG(_p) \ + (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4) +#define RTL8366RB_PORT_VLAN_CTRL_MASK 0xf +#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p) (4 * ((_p) % 4)) + +#define RTL8366RB_VLAN_TABLE_READ_BASE 0x018C +#define RTL8366RB_VLAN_TABLE_WRITE_BASE 0x0185 + +#define RTL8366RB_TABLE_ACCESS_CTRL_REG 0x0180 +#define RTL8366RB_TABLE_VLAN_READ_CTRL 0x0E01 +#define RTL8366RB_TABLE_VLAN_WRITE_CTRL 0x0F01 + +#define RTL8366RB_VLAN_MC_BASE(_x) (0x0020 + (_x) * 3) + +#define RTL8366RB_PORT_LINK_STATUS_BASE 0x0014 +#define RTL8366RB_PORT_STATUS_SPEED_MASK 0x0003 +#define RTL8366RB_PORT_STATUS_DUPLEX_MASK 0x0004 +#define RTL8366RB_PORT_STATUS_LINK_MASK 0x0010 +#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK 0x0020 +#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK 0x0040 +#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 + +#define RTL8366RB_NUM_VLANS 16 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_NUM_VIDS 4096 +#define RTL8366RB_PRIORITYMAX 7 +#define RTL8366RB_FIDMAX 7 + +#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */ +#define RTL8366RB_PORT_2 BIT(1) /* In userspace port 1 */ +#define RTL8366RB_PORT_3 BIT(2) /* In userspace port 2 */ +#define RTL8366RB_PORT_4 BIT(3) /* In userspace port 3 */ +#define RTL8366RB_PORT_5 BIT(4) /* In userspace port 4 */ + +#define RTL8366RB_PORT_CPU BIT(5) /* CPU port */ + +#define RTL8366RB_PORT_ALL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5 | \ + RTL8366RB_PORT_CPU) + +#define RTL8366RB_PORT_ALL_BUT_CPU (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4 | \ + RTL8366RB_PORT_5) + +#define RTL8366RB_PORT_ALL_EXTERNAL (RTL8366RB_PORT_1 | \ + RTL8366RB_PORT_2 | \ + RTL8366RB_PORT_3 | \ + RTL8366RB_PORT_4) + +#define RTL8366RB_PORT_ALL_INTERNAL RTL8366RB_PORT_CPU + +/* First configuration word per member config, VID and prio */ +#define RTL8366RB_VLAN_VID_MASK 0xfff +#define RTL8366RB_VLAN_PRIORITY_SHIFT 12 +#define RTL8366RB_VLAN_PRIORITY_MASK 0x7 +/* Second configuration word per member config, member and untagged */ +#define RTL8366RB_VLAN_UNTAG_SHIFT 8 +#define RTL8366RB_VLAN_UNTAG_MASK 0xff +#define RTL8366RB_VLAN_MEMBER_MASK 0xff +/* Third config word per member config, STAG currently unused */ +#define RTL8366RB_VLAN_STAG_MBR_MASK 0xff +#define RTL8366RB_VLAN_STAG_MBR_SHIFT 8 +#define RTL8366RB_VLAN_STAG_IDX_MASK 0x7 +#define RTL8366RB_VLAN_STAG_IDX_SHIFT 5 +#define RTL8366RB_VLAN_FID_MASK 0x7 + +/* Port ingress bandwidth control */ +#define RTL8366RB_IB_BASE 0x0200 +#define RTL8366RB_IB_REG(pnum) (RTL8366RB_IB_BASE + (pnum)) +#define RTL8366RB_IB_BDTH_MASK 0x3fff +#define RTL8366RB_IB_PREIFG BIT(14) + +/* Port egress bandwidth control */ +#define RTL8366RB_EB_BASE 0x02d1 +#define RTL8366RB_EB_REG(pnum) (RTL8366RB_EB_BASE + (pnum)) +#define RTL8366RB_EB_BDTH_MASK 0x3fff +#define RTL8366RB_EB_PREIFG_REG 0x02f8 +#define RTL8366RB_EB_PREIFG BIT(9) + +#define RTL8366RB_BDTH_SW_MAX 1048512 /* 1048576? */ +#define RTL8366RB_BDTH_UNIT 64 +#define RTL8366RB_BDTH_REG_DEFAULT 16383 + +/* QOS */ +#define RTL8366RB_QOS BIT(15) +/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */ +#define RTL8366RB_QOS_DEFAULT_PREIFG 1 + +/* Interrupt handling */ +#define RTL8366RB_INTERRUPT_CONTROL_REG 0x0440 +#define RTL8366RB_INTERRUPT_POLARITY BIT(0) +#define RTL8366RB_P4_RGMII_LED BIT(2) +#define RTL8366RB_INTERRUPT_MASK_REG 0x0441 +#define RTL8366RB_INTERRUPT_LINK_CHGALL GENMASK(11, 0) +#define RTL8366RB_INTERRUPT_ACLEXCEED BIT(8) +#define RTL8366RB_INTERRUPT_STORMEXCEED BIT(9) +#define RTL8366RB_INTERRUPT_P4_FIBER BIT(12) +#define RTL8366RB_INTERRUPT_P4_UTP BIT(13) +#define RTL8366RB_INTERRUPT_VALID (RTL8366RB_INTERRUPT_LINK_CHGALL | \ + RTL8366RB_INTERRUPT_ACLEXCEED | \ + RTL8366RB_INTERRUPT_STORMEXCEED | \ + RTL8366RB_INTERRUPT_P4_FIBER | \ + RTL8366RB_INTERRUPT_P4_UTP) +#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 +#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ + +/* bits 0..5 enable force when cleared */ +#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 + +#define RTL8366RB_OAM_PARSER_REG 0x0F14 +#define RTL8366RB_OAM_MULTIPLEXER_REG 0x0F15 + +#define RTL8366RB_GREEN_FEATURE_REG 0x0F51 +#define RTL8366RB_GREEN_FEATURE_MSK 0x0007 +#define RTL8366RB_GREEN_FEATURE_TX BIT(0) +#define RTL8366RB_GREEN_FEATURE_RX BIT(2) + +static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { + { 0, 0, 4, "IfInOctets" }, + { 0, 4, 4, "EtherStatsOctets" }, + { 0, 8, 2, "EtherStatsUnderSizePkts" }, + { 0, 10, 2, "EtherFragments" }, + { 0, 12, 2, "EtherStatsPkts64Octets" }, + { 0, 14, 2, "EtherStatsPkts65to127Octets" }, + { 0, 16, 2, "EtherStatsPkts128to255Octets" }, + { 0, 18, 2, "EtherStatsPkts256to511Octets" }, + { 0, 20, 2, "EtherStatsPkts512to1023Octets" }, + { 0, 22, 2, "EtherStatsPkts1024to1518Octets" }, + { 0, 24, 2, "EtherOversizeStats" }, + { 0, 26, 2, "EtherStatsJabbers" }, + { 0, 28, 2, "IfInUcastPkts" }, + { 0, 30, 2, "EtherStatsMulticastPkts" }, + { 0, 32, 2, "EtherStatsBroadcastPkts" }, + { 0, 34, 2, "EtherStatsDropEvents" }, + { 0, 36, 2, "Dot3StatsFCSErrors" }, + { 0, 38, 2, "Dot3StatsSymbolErrors" }, + { 0, 40, 2, "Dot3InPauseFrames" }, + { 0, 42, 2, "Dot3ControlInUnknownOpcodes" }, + { 0, 44, 4, "IfOutOctets" }, + { 0, 48, 2, "Dot3StatsSingleCollisionFrames" }, + { 0, 50, 2, "Dot3StatMultipleCollisionFrames" }, + { 0, 52, 2, "Dot3sDeferredTransmissions" }, + { 0, 54, 2, "Dot3StatsLateCollisions" }, + { 0, 56, 2, "EtherStatsCollisions" }, + { 0, 58, 2, "Dot3StatsExcessiveCollisions" }, + { 0, 60, 2, "Dot3OutPauseFrames" }, + { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" }, + { 0, 64, 2, "Dot1dTpPortInDiscards" }, + { 0, 66, 2, "IfOutUcastPkts" }, + { 0, 68, 2, "IfOutMulticastPkts" }, + { 0, 70, 2, "IfOutBroadcastPkts" }, +}; + +static int rtl8366rb_get_mib_counter(struct realtek_smi *smi, + int port, + struct rtl8366_mib_counter *mib, + u64 *mibvalue) +{ + u32 addr, val; + int ret; + int i; + + addr = RTL8366RB_MIB_COUNTER_BASE + + RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) + + mib->offset; + + /* Writing access counter address first + * then ASIC will prepare 64bits counter wait for being retrived + */ + ret = regmap_write(smi->map, addr, 0); /* Write whatever */ + if (ret) + return ret; + + /* Read MIB control register */ + ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val); + if (ret) + return -EIO; + + if (val & RTL8366RB_MIB_CTRL_BUSY_MASK) + return -EBUSY; + + if (val & RTL8366RB_MIB_CTRL_RESET_MASK) + return -EIO; + + /* Read each individual MIB 16 bits at the time */ + *mibvalue = 0; + for (i = mib->length; i > 0; i--) { + ret = regmap_read(smi->map, addr + (i - 1), &val); + if (ret) + return ret; + *mibvalue = (*mibvalue << 16) | (val & 0xFFFF); + } + return 0; +} + +static u32 rtl8366rb_get_irqmask(struct irq_data *d) +{ + int line = irqd_to_hwirq(d); + u32 val; + + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12) + val = BIT(line) | BIT(line + 6); + else + val = BIT(line); + return val; +} + +static void rtl8366rb_mask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), 0); + if (ret) + dev_err(smi->dev, "could not mask IRQ\n"); +} + +static void rtl8366rb_unmask_irq(struct irq_data *d) +{ + struct realtek_smi *smi = irq_data_get_irq_chip_data(d); + int ret; + + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, + rtl8366rb_get_irqmask(d), + rtl8366rb_get_irqmask(d)); + if (ret) + dev_err(smi->dev, "could not unmask IRQ\n"); +} + +static irqreturn_t rtl8366rb_irq(int irq, void *data) +{ + struct realtek_smi *smi = data; + u32 stat; + int ret; + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &stat); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return IRQ_NONE; + } + stat &= RTL8366RB_INTERRUPT_VALID; + if (!stat) + return IRQ_NONE; + while (stat) { + int line = __ffs(stat); + int child_irq; + + stat &= ~BIT(line); + /* For line interrupts we combine link down in bits + * 6..11 with link up in bits 0..5 into one interrupt. + */ + if (line < 12 && line > 5) + line -= 5; + child_irq = irq_find_mapping(smi->irqdomain, line); + handle_nested_irq(child_irq); + } + return IRQ_HANDLED; +} + +static struct irq_chip rtl8366rb_irq_chip = { + .name = "RTL8366RB", + .irq_mask = rtl8366rb_mask_irq, + .irq_unmask = rtl8366rb_unmask_irq, +}; + +static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_data(irq, domain->host_data); + irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq); + irq_set_nested_thread(irq, 1); + irq_set_noprobe(irq); + + return 0; +} + +static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_set_nested_thread(irq, 0); + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); +} + +static const struct irq_domain_ops rtl8366rb_irqdomain_ops = { + .map = rtl8366rb_irq_map, + .unmap = rtl8366rb_irq_unmap, + .xlate = irq_domain_xlate_onecell, +}; + +static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi) +{ + struct device_node *intc; + unsigned long irq_trig; + int irq; + int ret; + u32 val; + int i; + + intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller"); + if (!intc) { + dev_err(smi->dev, "missing child interrupt-controller node\n"); + return -EINVAL; + } + /* RB8366RB IRQs cascade off this one */ + irq = of_irq_get(intc, 0); + if (irq <= 0) { + dev_err(smi->dev, "failed to get parent IRQ\n"); + return irq ? irq : -EINVAL; + } + + /* This clears the IRQ status register */ + ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, + &val); + if (ret) { + dev_err(smi->dev, "can't read interrupt status\n"); + return ret; + } + + /* Fetch IRQ edge information from the descriptor */ + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + switch (irq_trig) { + case IRQF_TRIGGER_RISING: + case IRQF_TRIGGER_HIGH: + dev_info(smi->dev, "active high/rising IRQ\n"); + val = 0; + break; + case IRQF_TRIGGER_FALLING: + case IRQF_TRIGGER_LOW: + dev_info(smi->dev, "active low/falling IRQ\n"); + val = RTL8366RB_INTERRUPT_POLARITY; + break; + } + ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_INTERRUPT_POLARITY, + val); + if (ret) { + dev_err(smi->dev, "could not configure IRQ polarity\n"); + return ret; + } + + ret = devm_request_threaded_irq(smi->dev, irq, NULL, + rtl8366rb_irq, IRQF_ONESHOT, + "RTL8366RB", smi); + if (ret) { + dev_err(smi->dev, "unable to request irq: %d\n", ret); + return ret; + } + smi->irqdomain = irq_domain_add_linear(intc, + RTL8366RB_NUM_INTERRUPT, + &rtl8366rb_irqdomain_ops, + smi); + if (!smi->irqdomain) { + dev_err(smi->dev, "failed to create IRQ domain\n"); + return -EINVAL; + } + for (i = 0; i < smi->num_ports; i++) + irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq); + + return 0; +} + +static int rtl8366rb_set_addr(struct realtek_smi *smi) +{ + u8 addr[ETH_ALEN]; + u16 val; + int ret; + + eth_random_addr(addr); + + dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + val = addr[0] << 8 | addr[1]; + ret = regmap_write(smi->map, RTL8366RB_SMAR0, val); + if (ret) + return ret; + val = addr[2] << 8 | addr[3]; + ret = regmap_write(smi->map, RTL8366RB_SMAR1, val); + if (ret) + return ret; + val = addr[4] << 8 | addr[5]; + ret = regmap_write(smi->map, RTL8366RB_SMAR2, val); + if (ret) + return ret; + + return 0; +} + +/* Found in a vendor driver */ + +/* For the "version 0" early silicon, appear in most source releases */ +static const u16 rtl8366rb_init_jam_ver_0[] = { + 0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF, + 0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF, + 0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688, + 0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830, + 0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8, + 0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000, + 0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A, + 0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A, + 0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A, + 0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A, + 0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A, + 0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A, + 0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961, + 0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146, + 0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146, + 0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320, + 0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146, + 0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146, + 0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146, + 0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294, + 0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294, + 0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294, + 0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294, + 0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448, + 0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B, + 0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD, + 0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C, + 0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13, + 0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF, +}; + +/* This v1 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_1[] = { + 0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C, + 0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585, + 0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135, + 0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F, + 0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20, + 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000, + 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140, + 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A, + 0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0, + 0xBE79, 0x3C3C, 0xBE00, 0x1340, +}; + +/* This v2 init sequence is from Belkin F5D8235 U-Boot release */ +static const u16 rtl8366rb_init_jam_ver_2[] = { + 0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14, + 0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85, + 0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881, + 0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320, + 0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007, + 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140, + 0xBE00, 0x1340, 0x0F51, 0x0010, +}; + +/* Appears in a DDWRT code dump */ +static const u16 rtl8366rb_init_jam_ver_3[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432, + 0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, + 0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B, + 0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B, + 0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185, + 0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585, + 0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185, + 0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51, + 0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, + 0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, + 0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, + 0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001, + 0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320, + 0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120, + 0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140, + 0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, + 0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000, +}; + +/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */ +static const u16 rtl8366rb_init_jam_f5d8235[] = { + 0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF, + 0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F, + 0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F, + 0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F, + 0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3, + 0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304, + 0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500, + 0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00, + 0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF, +}; + +/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */ +static const u16 rtl8366rb_init_jam_dgn3500[] = { + 0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017, + 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000, + 0x0401, 0x0000, 0x0431, 0x0960, +}; + +/* This jam table activates "green ethernet", which means low power mode + * and is claimed to detect the cable length and not use more power than + * necessary, and the ports should enter power saving mode 10 seconds after + * a cable is disconnected. Seems to always be the same. + */ +static const u16 rtl8366rb_green_jam[][2] = { + {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7}, + {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C}, + {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C}, +}; + +static int rtl8366rb_setup(struct dsa_switch *ds) +{ + struct realtek_smi *smi = ds->priv; + const u16 *jam_table; + u32 chip_ver = 0; + u32 chip_id = 0; + int jam_size; + u32 val; + int ret; + int i; + + ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id); + if (ret) { + dev_err(smi->dev, "unable to read chip id\n"); + return ret; + } + + switch (chip_id) { + case RTL8366RB_CHIP_ID_8366: + break; + default: + dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id); + return -ENODEV; + } + + ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG, + &chip_ver); + if (ret) { + dev_err(smi->dev, "unable to read chip version\n"); + return ret; + } + + dev_info(smi->dev, "RTL%04x ver %u chip found\n", + chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK); + + /* Do the init dance using the right jam table */ + switch (chip_ver) { + case 0: + jam_table = rtl8366rb_init_jam_ver_0; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0); + break; + case 1: + jam_table = rtl8366rb_init_jam_ver_1; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1); + break; + case 2: + jam_table = rtl8366rb_init_jam_ver_2; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2); + break; + default: + jam_table = rtl8366rb_init_jam_ver_3; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3); + break; + } + + /* Special jam tables for special routers + * TODO: are these necessary? Maintainers, please test + * without them, using just the off-the-shelf tables. + */ + if (of_machine_is_compatible("belkin,f5d8235-v1")) { + jam_table = rtl8366rb_init_jam_f5d8235; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235); + } + if (of_machine_is_compatible("netgear,dgn3500") || + of_machine_is_compatible("netgear,dgn3500b")) { + jam_table = rtl8366rb_init_jam_dgn3500; + jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500); + } + + i = 0; + while (i < jam_size) { + if ((jam_table[i] & 0xBE00) == 0xBE00) { + ret = regmap_read(smi->map, + RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + } + } + dev_dbg(smi->dev, "jam %04x into register %04x\n", + jam_table[i + 1], + jam_table[i]); + ret = regmap_write(smi->map, + jam_table[i], + jam_table[i + 1]); + if (ret) + return ret; + i += 2; + } + + /* Set up the "green ethernet" feature */ + i = 0; + while (i < ARRAY_SIZE(rtl8366rb_green_jam)) { + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG, + &val); + if (ret) + return ret; + if (!(val & RTL8366RB_PHY_INT_BUSY)) { + ret = regmap_write(smi->map, + RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + ret = regmap_write(smi->map, + rtl8366rb_green_jam[i][0], + rtl8366rb_green_jam[i][1]); + if (ret) + return ret; + i++; + } + } + ret = regmap_write(smi->map, + RTL8366RB_GREEN_FEATURE_REG, + (chip_ver == 1) ? 0x0007 : 0x0003); + if (ret) + return ret; + + /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */ + ret = regmap_write(smi->map, 0x0c, 0x240); + if (ret) + return ret; + ret = regmap_write(smi->map, 0x0d, 0x240); + if (ret) + return ret; + + /* Set some random MAC address */ + ret = rtl8366rb_set_addr(smi); + if (ret) + return ret; + + /* Enable CPU port and enable inserting CPU tag + * + * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour + * of the switch totally and it will start talking Realtek RRCP + * internally. It is probably possible to experiment with this, + * but then the kernel needs to understand and handle RRCP first. + */ + ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG, + 0xFFFF, + RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port)); + if (ret) + return ret; + + /* Make sure we default-enable the fixed CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, + BIT(smi->cpu_port), + 0); + if (ret) + return ret; + + /* Set maximum packet length to 1536 bytes */ + ret = regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_MAX_LENGTH_MASK, + RTL8366RB_SGCR_MAX_LENGTH_1536); + if (ret) + return ret; + + /* Enable learning for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); + if (ret) + return ret; + + /* Enable auto ageing for all ports */ + ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); + if (ret) + return ret; + + /* Discard VLAN tagged packets if the port is not a member of + * the VLAN with which the packets is associated. + */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, + RTL8366RB_PORT_ALL); + if (ret) + return ret; + + /* Don't drop packets whose DA has not been learned */ + ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2, + RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0); + if (ret) + return ret; + + /* Set blinking, TODO: make this configurable */ + ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG, + RTL8366RB_LED_BLINKRATE_MASK, + RTL8366RB_LED_BLINKRATE_56MS); + if (ret) + return ret; + + /* Set up LED activity: + * Each port has 4 LEDs, we configure all ports to the same + * behaviour (no individual config) but we can set up each + * LED separately. + */ + if (smi->leds_disabled) { + /* Turn everything off */ + regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x0FFF, 0); + regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + 0); + val = RTL8366RB_LED_OFF; + } else { + /* TODO: make this configurable per LED */ + val = RTL8366RB_LED_FORCE; + } + for (i = 0; i < 4; i++) { + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_CTRL_REG, + 0xf << (i * 4), + val << (i * 4)); + if (ret) + return ret; + } + + ret = rtl8366_init_vlan(smi); + if (ret) + return ret; + + ret = rtl8366rb_setup_cascaded_irq(smi); + if (ret) + dev_info(smi->dev, "no interrupt support\n"); + + ret = realtek_smi_setup_mdio(smi); + if (ret) { + dev_info(smi->dev, "could not set up MDIO bus\n"); + return -ENODEV; + } + + return 0; +} + +static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* For now, the RTL switches are handled without any custom tags. + * + * It is possible to turn on "custom tags" by removing the + * RTL8368RB_CPU_INSTAG flag when enabling the port but what it + * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek + * Remote Control Protocol (RRCP) start to appear on the CPU port of + * the device. So this is not the ordinary few extra bytes in the + * frame. Instead it appears that the switch starts to talk Realtek + * RRCP internally which means a pretty complex RRCP implementation + * decoding and responding the RRCP protocol is needed to exploit this. + * + * The OpenRRCP project (dormant since 2009) have reverse-egineered + * parts of the protocol. + */ + return DSA_TAG_PROTO_NONE; +} + +static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + if (port != smi->cpu_port) + return; + + dev_info(smi->dev, "adjust link on CPU port (%d)\n", port); + + /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */ + ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG, + BIT(port), BIT(port)); + if (ret) + return; + + ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2, + 0xFF00U, + RTL8366RB_PAACR_CPU_PORT << 8); + if (ret) + return; + + /* Enable the CPU port */ + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return; +} + +static void rb8366rb_set_port_led(struct realtek_smi *smi, + int port, bool enable) +{ + u16 val = enable ? 0x3f : 0; + int ret; + + if (smi->leds_disabled) + return; + + switch (port) { + case 0: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F, val); + break; + case 1: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_0_1_CTRL_REG, + 0x3F << RTL8366RB_LED_1_OFFSET, + val << RTL8366RB_LED_1_OFFSET); + break; + case 2: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F, val); + break; + case 3: + ret = regmap_update_bits(smi->map, + RTL8366RB_LED_2_3_CTRL_REG, + 0x3F << RTL8366RB_LED_3_OFFSET, + val << RTL8366RB_LED_3_OFFSET); + break; + case 4: + ret = regmap_update_bits(smi->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + enable ? RTL8366RB_P4_RGMII_LED : 0); + break; + default: + dev_err(smi->dev, "no LED for port %d\n", port); + return; + } + if (ret) + dev_err(smi->dev, "error updating LED on port %d\n", port); +} + +static int +rtl8366rb_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "enable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + 0); + if (ret) + return ret; + + rb8366rb_set_port_led(smi, port, true); + return 0; +} + +static void +rtl8366rb_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct realtek_smi *smi = ds->priv; + int ret; + + dev_dbg(smi->dev, "disable port %d\n", port); + ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), + BIT(port)); + if (ret) + return; + + rb8366rb_set_port_led(smi, port, false); +} + +static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid, + struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k)); + + if (vid >= RTL8366RB_NUM_VIDS) + return -EINVAL; + + /* write VID */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE, + vid & RTL8366RB_VLAN_VID_MASK); + if (ret) + return ret; + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_READ_CTRL); + if (ret) + return ret; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_TABLE_READ_BASE + i, + &data[i]); + if (ret) + return ret; + } + + vlan4k->vid = vid; + vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi, + const struct rtl8366_vlan_4k *vlan4k) +{ + u32 data[3]; + int ret; + int i; + + if (vlan4k->vid >= RTL8366RB_NUM_VIDS || + vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK || + vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlan4k->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK; + data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_TABLE_WRITE_BASE + i, + data[i]); + if (ret) + return ret; + } + + /* write table access control word */ + ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, + RTL8366RB_TABLE_VLAN_WRITE_CTRL); + + return ret; +} + +static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index, + struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc)); + + if (index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + for (i = 0; i < 3; i++) { + ret = regmap_read(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + &data[i]); + if (ret) + return ret; + } + + vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK; + vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) & + RTL8366RB_VLAN_PRIORITY_MASK; + vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) & + RTL8366RB_VLAN_UNTAG_MASK; + vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK; + vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK; + + return 0; +} + +static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index, + const struct rtl8366_vlan_mc *vlanmc) +{ + u32 data[3]; + int ret; + int i; + + if (index >= RTL8366RB_NUM_VLANS || + vlanmc->vid >= RTL8366RB_NUM_VIDS || + vlanmc->priority > RTL8366RB_PRIORITYMAX || + vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK || + vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK || + vlanmc->fid > RTL8366RB_FIDMAX) + return -EINVAL; + + data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) | + ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) << + RTL8366RB_VLAN_PRIORITY_SHIFT); + data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) | + ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) << + RTL8366RB_VLAN_UNTAG_SHIFT); + data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK; + + for (i = 0; i < 3; i++) { + ret = regmap_write(smi->map, + RTL8366RB_VLAN_MC_BASE(index) + i, + data[i]); + if (ret) + return ret; + } + + return 0; +} + +static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val) +{ + u32 data; + int ret; + + if (port >= smi->num_ports) + return -EINVAL; + + ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + &data); + if (ret) + return ret; + + *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) & + RTL8366RB_PORT_VLAN_CTRL_MASK; + + return 0; +} + +static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) +{ + if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) + return -EINVAL; + + return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + RTL8366RB_PORT_VLAN_CTRL_MASK << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), + (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << + RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); +} + +static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) +{ + unsigned int max = RTL8366RB_NUM_VLANS; + + if (smi->vlan4k_enabled) + max = RTL8366RB_NUM_VIDS - 1; + + if (vlan == 0 || vlan >= max) + return false; + + return true; +} + +static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, + RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN, + enable ? RTL8366RB_SGCR_EN_VLAN : 0); +} + +static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable) +{ + dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable"); + return regmap_update_bits(smi->map, RTL8366RB_SGCR, + RTL8366RB_SGCR_EN_VLAN_4KTB, + enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0); +} + +static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum) +{ + u32 val; + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_READ); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + ret = regmap_write(smi->map, reg, 0); + if (ret) { + dev_err(smi->dev, + "failed to write PHY%d reg %04x @ %04x, ret %d\n", + phy, regnum, reg, ret); + return ret; + } + + ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val); + if (ret) + return ret; + + dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n", + phy, regnum, reg, val); + + return val; +} + +static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum, + u16 val) +{ + u32 reg; + int ret; + + if (phy > RTL8366RB_PHY_NO_MAX) + return -EINVAL; + + ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + RTL8366RB_PHY_CTRL_WRITE); + if (ret) + return ret; + + reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; + + dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n", + phy, regnum, reg, val); + + ret = regmap_write(smi->map, reg, val); + if (ret) + return ret; + + return 0; +} + +static int rtl8366rb_reset_chip(struct realtek_smi *smi) +{ + int timeout = 10; + u32 val; + int ret; + + realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG, + RTL8366RB_CHIP_CTRL_RESET_HW); + do { + usleep_range(20000, 25000); + ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val); + if (ret) + return ret; + + if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW)) + break; + } while (--timeout); + + if (!timeout) { + dev_err(smi->dev, "timeout waiting for the switch to reset\n"); + return -EIO; + } + + return 0; +} + +static int rtl8366rb_detect(struct realtek_smi *smi) +{ + struct device *dev = smi->dev; + int ret; + u32 val; + + /* Detect device */ + ret = regmap_read(smi->map, 0x5c, &val); + if (ret) { + dev_err(dev, "can't get chip ID (%d)\n", ret); + return ret; + } + + switch (val) { + case 0x6027: + dev_info(dev, "found an RTL8366S switch\n"); + dev_err(dev, "this switch is not yet supported, submit patches!\n"); + return -ENODEV; + case 0x5937: + dev_info(dev, "found an RTL8366RB switch\n"); + smi->cpu_port = RTL8366RB_PORT_NUM_CPU; + smi->num_ports = RTL8366RB_NUM_PORTS; + smi->num_vlan_mc = RTL8366RB_NUM_VLANS; + smi->mib_counters = rtl8366rb_mib_counters; + smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters); + break; + default: + dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n", + val); + break; + } + + ret = rtl8366rb_reset_chip(smi); + if (ret) + return ret; + + return 0; +} + +static const struct dsa_switch_ops rtl8366rb_switch_ops = { + .get_tag_protocol = rtl8366_get_tag_protocol, + .setup = rtl8366rb_setup, + .adjust_link = rtl8366rb_adjust_link, + .get_strings = rtl8366_get_strings, + .get_ethtool_stats = rtl8366_get_ethtool_stats, + .get_sset_count = rtl8366_get_sset_count, + .port_vlan_filtering = rtl8366_vlan_filtering, + .port_vlan_prepare = rtl8366_vlan_prepare, + .port_vlan_add = rtl8366_vlan_add, + .port_vlan_del = rtl8366_vlan_del, + .port_enable = rtl8366rb_port_enable, + .port_disable = rtl8366rb_port_disable, +}; + +static const struct realtek_smi_ops rtl8366rb_smi_ops = { + .detect = rtl8366rb_detect, + .get_vlan_mc = rtl8366rb_get_vlan_mc, + .set_vlan_mc = rtl8366rb_set_vlan_mc, + .get_vlan_4k = rtl8366rb_get_vlan_4k, + .set_vlan_4k = rtl8366rb_set_vlan_4k, + .get_mc_index = rtl8366rb_get_mc_index, + .set_mc_index = rtl8366rb_set_mc_index, + .get_mib_counter = rtl8366rb_get_mib_counter, + .is_vlan_valid = rtl8366rb_is_vlan_valid, + .enable_vlan = rtl8366rb_enable_vlan, + .enable_vlan4k = rtl8366rb_enable_vlan4k, + .phy_read = rtl8366rb_phy_read, + .phy_write = rtl8366rb_phy_write, +}; + +const struct realtek_smi_variant rtl8366rb_variant = { + .ds_ops = &rtl8366rb_switch_ops, + .ops = &rtl8366rb_smi_ops, + .clk_delay = 10, + .cmd_read = 0xa9, + .cmd_write = 0xa8, +}; +EXPORT_SYMBOL_GPL(rtl8366rb_variant); diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c new file mode 100644 index 000000000000..9f1b5f2e8a64 --- /dev/null +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -0,0 +1,1365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* DSA driver for: + * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch + * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + * + * These switches have a built-in 8051 CPU and can download and execute a + * firmware in this CPU. They can also be configured to use an external CPU + * handling the switch in a memory-mapped manner by connecting to that external + * CPU's memory bus. + * + * This driver (currently) only takes control of the switch chip over SPI and + * configures it to route packages around when connected to a CPU port. The + * chip has embedded PHYs and VLAN support so we model it using DSA. + * + * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org> + * Includes portions of code from the firmware uploader by: + * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/bitops.h> +#include <linux/if_bridge.h> +#include <linux/etherdevice.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/random.h> +#include <net/dsa.h> + +#define VSC73XX_BLOCK_MAC 0x1 /* Subblocks 0-4, 6 (CPU port) */ +#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */ +#define VSC73XX_BLOCK_MII 0x3 /* Subblocks 0 and 1 */ +#define VSC73XX_BLOCK_MEMINIT 0x3 /* Only subblock 2 */ +#define VSC73XX_BLOCK_CAPTURE 0x4 /* Only subblock 2 */ +#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */ +#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */ + +#define CPU_PORT 6 /* CPU port */ + +/* MAC Block registers */ +#define VSC73XX_MAC_CFG 0x00 +#define VSC73XX_MACHDXGAP 0x02 +#define VSC73XX_FCCONF 0x04 +#define VSC73XX_FCMACHI 0x08 +#define VSC73XX_FCMACLO 0x0c +#define VSC73XX_MAXLEN 0x10 +#define VSC73XX_ADVPORTM 0x19 +#define VSC73XX_TXUPDCFG 0x24 +#define VSC73XX_TXQ_SELECT_CFG 0x28 +#define VSC73XX_RXOCT 0x50 +#define VSC73XX_TXOCT 0x51 +#define VSC73XX_C_RX0 0x52 +#define VSC73XX_C_RX1 0x53 +#define VSC73XX_C_RX2 0x54 +#define VSC73XX_C_TX0 0x55 +#define VSC73XX_C_TX1 0x56 +#define VSC73XX_C_TX2 0x57 +#define VSC73XX_C_CFG 0x58 +#define VSC73XX_CAT_DROP 0x6e +#define VSC73XX_CAT_PR_MISC_L2 0x6f +#define VSC73XX_CAT_PR_USR_PRIO 0x75 +#define VSC73XX_Q_MISC_CONF 0xdf + +/* MAC_CFG register bits */ +#define VSC73XX_MAC_CFG_WEXC_DIS BIT(31) +#define VSC73XX_MAC_CFG_PORT_RST BIT(29) +#define VSC73XX_MAC_CFG_TX_EN BIT(28) +#define VSC73XX_MAC_CFG_SEED_LOAD BIT(27) +#define VSC73XX_MAC_CFG_SEED_MASK GENMASK(26, 19) +#define VSC73XX_MAC_CFG_SEED_OFFSET 19 +#define VSC73XX_MAC_CFG_FDX BIT(18) +#define VSC73XX_MAC_CFG_GIGA_MODE BIT(17) +#define VSC73XX_MAC_CFG_RX_EN BIT(16) +#define VSC73XX_MAC_CFG_VLAN_DBLAWR BIT(15) +#define VSC73XX_MAC_CFG_VLAN_AWR BIT(14) +#define VSC73XX_MAC_CFG_100_BASE_T BIT(13) /* Not in manual */ +#define VSC73XX_MAC_CFG_TX_IPG_MASK GENMASK(10, 6) +#define VSC73XX_MAC_CFG_TX_IPG_OFFSET 6 +#define VSC73XX_MAC_CFG_TX_IPG_1000M (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET) +#define VSC73XX_MAC_CFG_MAC_RX_RST BIT(5) +#define VSC73XX_MAC_CFG_MAC_TX_RST BIT(4) +#define VSC73XX_MAC_CFG_CLK_SEL_MASK GENMASK(2, 0) +#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0 +#define VSC73XX_MAC_CFG_CLK_SEL_1000M 1 +#define VSC73XX_MAC_CFG_CLK_SEL_100M 2 +#define VSC73XX_MAC_CFG_CLK_SEL_10M 3 +#define VSC73XX_MAC_CFG_CLK_SEL_EXT 4 + +#define VSC73XX_MAC_CFG_1000M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_F_PHY (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_100_10M_H_PHY (VSC73XX_MAC_CFG_TX_IPG_100_10M | \ + VSC73XX_MAC_CFG_CLK_SEL_EXT) +#define VSC73XX_MAC_CFG_1000M_F_RGMII (VSC73XX_MAC_CFG_FDX | \ + VSC73XX_MAC_CFG_GIGA_MODE | \ + VSC73XX_MAC_CFG_TX_IPG_1000M | \ + VSC73XX_MAC_CFG_CLK_SEL_1000M) +#define VSC73XX_MAC_CFG_RESET (VSC73XX_MAC_CFG_PORT_RST | \ + VSC73XX_MAC_CFG_MAC_RX_RST | \ + VSC73XX_MAC_CFG_MAC_TX_RST) + +/* Flow control register bits */ +#define VSC73XX_FCCONF_ZERO_PAUSE_EN BIT(17) +#define VSC73XX_FCCONF_FLOW_CTRL_OBEY BIT(16) +#define VSC73XX_FCCONF_PAUSE_VAL_MASK GENMASK(15, 0) + +/* ADVPORTM advanced port setup register bits */ +#define VSC73XX_ADVPORTM_IFG_PPM BIT(7) +#define VSC73XX_ADVPORTM_EXC_COL_CONT BIT(6) +#define VSC73XX_ADVPORTM_EXT_PORT BIT(5) +#define VSC73XX_ADVPORTM_INV_GTX BIT(4) +#define VSC73XX_ADVPORTM_ENA_GTX BIT(3) +#define VSC73XX_ADVPORTM_DDR_MODE BIT(2) +#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1) +#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0) + +/* CAT_DROP categorizer frame dropping register bits */ +#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6) +#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4) +#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA BIT(3) +#define VSC73XX_CAT_DROP_UNTAGGED_ENA BIT(2) +#define VSC73XX_CAT_DROP_TAGGED_ENA BIT(1) +#define VSC73XX_CAT_DROP_NULL_MAC_ENA BIT(0) + +#define VSC73XX_Q_MISC_CONF_EXTENT_MEM BIT(31) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK GENMASK(4, 1) +#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1) +#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0) + +/* Frame analyzer block 2 registers */ +#define VSC73XX_STORMLIMIT 0x02 +#define VSC73XX_ADVLEARN 0x03 +#define VSC73XX_IFLODMSK 0x04 +#define VSC73XX_VLANMASK 0x05 +#define VSC73XX_MACHDATA 0x06 +#define VSC73XX_MACLDATA 0x07 +#define VSC73XX_ANMOVED 0x08 +#define VSC73XX_ANAGEFIL 0x09 +#define VSC73XX_ANEVENTS 0x0a +#define VSC73XX_ANCNTMASK 0x0b +#define VSC73XX_ANCNTVAL 0x0c +#define VSC73XX_LEARNMASK 0x0d +#define VSC73XX_UFLODMASK 0x0e +#define VSC73XX_MFLODMASK 0x0f +#define VSC73XX_RECVMASK 0x10 +#define VSC73XX_AGGRCTRL 0x20 +#define VSC73XX_AGGRMSKS 0x30 /* Until 0x3f */ +#define VSC73XX_DSTMASKS 0x40 /* Until 0x7f */ +#define VSC73XX_SRCMASKS 0x80 /* Until 0x87 */ +#define VSC73XX_CAPENAB 0xa0 +#define VSC73XX_MACACCESS 0xb0 +#define VSC73XX_IPMCACCESS 0xb1 +#define VSC73XX_MACTINDX 0xc0 +#define VSC73XX_VLANACCESS 0xd0 +#define VSC73XX_VLANTIDX 0xe0 +#define VSC73XX_AGENCTRL 0xf0 +#define VSC73XX_CAPRST 0xff + +#define VSC73XX_MACACCESS_CPU_COPY BIT(14) +#define VSC73XX_MACACCESS_FWD_KILL BIT(13) +#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12) +#define VSC73XX_MACACCESS_AGED_FLAG BIT(11) +#define VSC73XX_MACACCESS_VALID BIT(10) +#define VSC73XX_MACACCESS_LOCKED BIT(9) +#define VSC73XX_MACACCESS_DEST_IDX_MASK GENMASK(8, 3) +#define VSC73XX_MACACCESS_CMD_MASK GENMASK(2, 0) +#define VSC73XX_MACACCESS_CMD_IDLE 0 +#define VSC73XX_MACACCESS_CMD_LEARN 1 +#define VSC73XX_MACACCESS_CMD_FORGET 2 +#define VSC73XX_MACACCESS_CMD_AGE_TABLE 3 +#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE 4 +#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE 5 +#define VSC73XX_MACACCESS_CMD_READ_ENTRY 6 +#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY 7 + +#define VSC73XX_VLANACCESS_LEARN_DISABLED BIT(30) +#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29) +#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28) +#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0) +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3 + +/* MII block 3 registers */ +#define VSC73XX_MII_STAT 0x0 +#define VSC73XX_MII_CMD 0x1 +#define VSC73XX_MII_DATA 0x2 + +/* Arbiter block 5 registers */ +#define VSC73XX_ARBEMPTY 0x0c +#define VSC73XX_ARBDISC 0x0e +#define VSC73XX_SBACKWDROP 0x12 +#define VSC73XX_DBACKWDROP 0x13 +#define VSC73XX_ARBBURSTPROB 0x15 + +/* System block 7 registers */ +#define VSC73XX_ICPU_SIPAD 0x01 +#define VSC73XX_GMIIDELAY 0x05 +#define VSC73XX_ICPU_CTRL 0x10 +#define VSC73XX_ICPU_ADDR 0x11 +#define VSC73XX_ICPU_SRAM 0x12 +#define VSC73XX_HWSEM 0x13 +#define VSC73XX_GLORESET 0x14 +#define VSC73XX_ICPU_MBOX_VAL 0x15 +#define VSC73XX_ICPU_MBOX_SET 0x16 +#define VSC73XX_ICPU_MBOX_CLR 0x17 +#define VSC73XX_CHIPID 0x18 +#define VSC73XX_GPIO 0x34 + +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE 0 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS 1 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS 2 +#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS 3 + +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE (0 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4) +#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4) + +#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31) +#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8) +#define VSC73XX_ICPU_CTRL_SRST_HOLD BIT(7) +#define VSC73XX_ICPU_CTRL_ICPU_PI_EN BIT(6) +#define VSC73XX_ICPU_CTRL_BOOT_EN BIT(3) +#define VSC73XX_ICPU_CTRL_EXT_ACC_EN BIT(2) +#define VSC73XX_ICPU_CTRL_CLK_EN BIT(1) +#define VSC73XX_ICPU_CTRL_SRST BIT(0) + +#define VSC73XX_CHIPID_ID_SHIFT 12 +#define VSC73XX_CHIPID_ID_MASK 0xffff +#define VSC73XX_CHIPID_REV_SHIFT 28 +#define VSC73XX_CHIPID_REV_MASK 0xf +#define VSC73XX_CHIPID_ID_7385 0x7385 +#define VSC73XX_CHIPID_ID_7388 0x7388 +#define VSC73XX_CHIPID_ID_7395 0x7395 +#define VSC73XX_CHIPID_ID_7398 0x7398 + +#define VSC73XX_GLORESET_STROBE BIT(4) +#define VSC73XX_GLORESET_ICPU_LOCK BIT(3) +#define VSC73XX_GLORESET_MEM_LOCK BIT(2) +#define VSC73XX_GLORESET_PHY_RESET BIT(1) +#define VSC73XX_GLORESET_MASTER_RESET BIT(0) + +#define VSC73XX_CMD_MODE_READ 0 +#define VSC73XX_CMD_MODE_WRITE 1 +#define VSC73XX_CMD_MODE_SHIFT 4 +#define VSC73XX_CMD_BLOCK_SHIFT 5 +#define VSC73XX_CMD_BLOCK_MASK 0x7 +#define VSC73XX_CMD_SUBBLOCK_MASK 0xf + +#define VSC7385_CLOCK_DELAY ((3 << 4) | 3) +#define VSC7385_CLOCK_DELAY_MASK ((3 << 4) | 3) + +#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_EXT_ACC_EN) + +#define VSC73XX_ICPU_CTRL_START (VSC73XX_ICPU_CTRL_CLK_DIV | \ + VSC73XX_ICPU_CTRL_BOOT_EN | \ + VSC73XX_ICPU_CTRL_CLK_EN | \ + VSC73XX_ICPU_CTRL_SRST) + +/** + * struct vsc73xx - VSC73xx state container + */ +struct vsc73xx { + struct device *dev; + struct gpio_desc *reset; + struct spi_device *spi; + struct dsa_switch *ds; + struct gpio_chip gc; + u16 chipid; + u8 addr[ETH_ALEN]; + struct mutex lock; /* Protects SPI traffic */ +}; + +#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385) +#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388) +#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395) +#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398) +#define IS_739X(a) (IS_7395(a) || IS_7398(a)) + +struct vsc73xx_counter { + u8 counter; + const char *name; +}; + +/* Counters are named according to the MIB standards where applicable. + * Some counters are custom, non-standard. The standard counters are + * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex + * 30A Counters. + */ +static const struct vsc73xx_counter vsc73xx_rx_counters[] = { + { 0, "RxEtherStatsPkts" }, + { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "RxTotalErrorPackets" }, /* non-standard counter */ + { 3, "RxEtherStatsBroadcastPkts" }, + { 4, "RxEtherStatsMulticastPkts" }, + { 5, "RxEtherStatsPkts64Octets" }, + { 6, "RxEtherStatsPkts65to127Octets" }, + { 7, "RxEtherStatsPkts128to255Octets" }, + { 8, "RxEtherStatsPkts256to511Octets" }, + { 9, "RxEtherStatsPkts512to1023Octets" }, + { 10, "RxEtherStatsPkts1024to1518Octets" }, + { 11, "RxJumboFrames" }, /* non-standard counter */ + { 12, "RxaPauseMACControlFramesTransmitted" }, + { 13, "RxFIFODrops" }, /* non-standard counter */ + { 14, "RxBackwardDrops" }, /* non-standard counter */ + { 15, "RxClassifierDrops" }, /* non-standard counter */ + { 16, "RxEtherStatsCRCAlignErrors" }, + { 17, "RxEtherStatsUndersizePkts" }, + { 18, "RxEtherStatsOversizePkts" }, + { 19, "RxEtherStatsFragments" }, + { 20, "RxEtherStatsJabbers" }, + { 21, "RxaMACControlFramesReceived" }, + /* 22-24 are undefined */ + { 25, "RxaFramesReceivedOK" }, + { 26, "RxQoSClass0" }, /* non-standard counter */ + { 27, "RxQoSClass1" }, /* non-standard counter */ + { 28, "RxQoSClass2" }, /* non-standard counter */ + { 29, "RxQoSClass3" }, /* non-standard counter */ +}; + +static const struct vsc73xx_counter vsc73xx_tx_counters[] = { + { 0, "TxEtherStatsPkts" }, + { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */ + { 2, "TxTotalErrorPackets" }, /* non-standard counter */ + { 3, "TxEtherStatsBroadcastPkts" }, + { 4, "TxEtherStatsMulticastPkts" }, + { 5, "TxEtherStatsPkts64Octets" }, + { 6, "TxEtherStatsPkts65to127Octets" }, + { 7, "TxEtherStatsPkts128to255Octets" }, + { 8, "TxEtherStatsPkts256to511Octets" }, + { 9, "TxEtherStatsPkts512to1023Octets" }, + { 10, "TxEtherStatsPkts1024to1518Octets" }, + { 11, "TxJumboFrames" }, /* non-standard counter */ + { 12, "TxaPauseMACControlFramesTransmitted" }, + { 13, "TxFIFODrops" }, /* non-standard counter */ + { 14, "TxDrops" }, /* non-standard counter */ + { 15, "TxEtherStatsCollisions" }, + { 16, "TxEtherStatsCRCAlignErrors" }, + { 17, "TxEtherStatsUndersizePkts" }, + { 18, "TxEtherStatsOversizePkts" }, + { 19, "TxEtherStatsFragments" }, + { 20, "TxEtherStatsJabbers" }, + /* 21-24 are undefined */ + { 25, "TxaFramesReceivedOK" }, + { 26, "TxQoSClass0" }, /* non-standard counter */ + { 27, "TxQoSClass1" }, /* non-standard counter */ + { 28, "TxQoSClass2" }, /* non-standard counter */ + { 29, "TxQoSClass3" }, /* non-standard counter */ +}; + +static int vsc73xx_is_addr_valid(u8 block, u8 subblock) +{ + switch (block) { + case VSC73XX_BLOCK_MAC: + switch (subblock) { + case 0 ... 4: + case 6: + return 1; + } + break; + + case VSC73XX_BLOCK_ANALYZER: + case VSC73XX_BLOCK_SYSTEM: + switch (subblock) { + case 0: + return 1; + } + break; + + case VSC73XX_BLOCK_MII: + case VSC73XX_BLOCK_CAPTURE: + case VSC73XX_BLOCK_ARBITER: + switch (subblock) { + case 0 ... 1: + return 1; + } + break; + } + + return 0; +} + +static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock) +{ + u8 ret; + + ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT; + ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT; + ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK; + + return ret; +} + +static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 *val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[4]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].rx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock); + cmd[1] = reg; + cmd[2] = 0; + cmd[3] = 0; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + if (ret) + return ret; + + *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + + return 0; +} + +static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, + u32 val) +{ + struct spi_transfer t[2]; + struct spi_message m; + u8 cmd[2]; + u8 buf[4]; + int ret; + + if (!vsc73xx_is_addr_valid(block, subblock)) + return -EINVAL; + + spi_message_init(&m); + + memset(&t, 0, sizeof(t)); + + t[0].tx_buf = cmd; + t[0].len = sizeof(cmd); + spi_message_add_tail(&t[0], &m); + + t[1].tx_buf = buf; + t[1].len = sizeof(buf); + spi_message_add_tail(&t[1], &m); + + cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock); + cmd[1] = reg; + + buf[0] = (val >> 24) & 0xff; + buf[1] = (val >> 16) & 0xff; + buf[2] = (val >> 8) & 0xff; + buf[3] = val & 0xff; + + mutex_lock(&vsc->lock); + ret = spi_sync(vsc->spi, &m); + mutex_unlock(&vsc->lock); + + return ret; +} + +static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock, + u8 reg, u32 mask, u32 val) +{ + u32 tmp, orig; + int ret; + + /* Same read-modify-write algorithm as e.g. regmap */ + ret = vsc73xx_read(vsc, block, subblock, reg, &orig); + if (ret) + return ret; + tmp = orig & ~mask; + tmp |= val & mask; + return vsc73xx_write(vsc, block, subblock, reg, tmp); +} + +static int vsc73xx_detect(struct vsc73xx *vsc) +{ + bool icpu_si_boot_en; + bool icpu_pi_en; + u32 val; + u32 rev; + int ret; + u32 id; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret); + return ret; + } + + if (val == 0xffffffff) { + dev_info(vsc->dev, "chip seems dead, assert reset\n"); + gpiod_set_value_cansleep(vsc->reset, 1); + /* Reset pulse should be 20ns minimum, according to datasheet + * table 245, so 10us should be fine + */ + usleep_range(10, 100); + gpiod_set_value_cansleep(vsc->reset, 0); + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_MBOX_VAL, &val); + if (val == 0xffffffff) { + dev_err(vsc->dev, "seems not to help, giving up\n"); + return -ENODEV; + } + } + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_CHIPID, &val); + if (ret) { + dev_err(vsc->dev, "unable to read chip id (%d)\n", ret); + return ret; + } + + id = (val >> VSC73XX_CHIPID_ID_SHIFT) & + VSC73XX_CHIPID_ID_MASK; + switch (id) { + case VSC73XX_CHIPID_ID_7385: + case VSC73XX_CHIPID_ID_7388: + case VSC73XX_CHIPID_ID_7395: + case VSC73XX_CHIPID_ID_7398: + break; + default: + dev_err(vsc->dev, "unsupported chip, id=%04x\n", id); + return -ENODEV; + } + + vsc->chipid = id; + rev = (val >> VSC73XX_CHIPID_REV_SHIFT) & + VSC73XX_CHIPID_REV_MASK; + dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev); + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_ICPU_CTRL, &val); + if (ret) { + dev_err(vsc->dev, "unable to read iCPU control\n"); + return ret; + } + + /* The iCPU can always be used but can boot in different ways. + * If it is initially disabled and has no external memory, + * we are in control and can do whatever we like, else we + * are probably in trouble (we need some way to communicate + * with the running firmware) so we bail out for now. + */ + icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN); + icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN); + if (icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, has external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (icpu_si_boot_en && !icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled boots from SI, no external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + if (!icpu_si_boot_en && icpu_pi_en) { + dev_err(vsc->dev, + "iCPU enabled, boots from PI external memory\n"); + dev_err(vsc->dev, "no idea how to deal with this\n"); + return -ENODEV; + } + /* !icpu_si_boot_en && !cpu_pi_en */ + dev_info(vsc->dev, "iCPU disabled, no external memory\n"); + + return 0; +} + +static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + u32 val; + int ret; + + /* Setting bit 26 means "read" */ + cmd = BIT(26) | (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + msleep(2); + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val); + if (ret) + return ret; + if (val & BIT(16)) { + dev_err(vsc->dev, "reading reg %02x from phy%d failed\n", + regnum, phy); + return -EIO; + } + val &= 0xFFFFU; + + dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n", + regnum, phy, val); + + return val; +} + +static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum, + u16 val) +{ + struct vsc73xx *vsc = ds->priv; + u32 cmd; + int ret; + + /* It was found through tedious experiments that this router + * chip really hates to have it's PHYs reset. They + * never recover if that happens: autonegotiation stops + * working after a reset. Just filter out this command. + * (Resetting the whole chip is OK.) + */ + if (regnum == 0 && (val & BIT(15))) { + dev_info(vsc->dev, "reset PHY - disallowed\n"); + return 0; + } + + cmd = (phy << 21) | (regnum << 16); + ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd); + if (ret) + return ret; + + dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n", + val, regnum, phy); + return 0; +} + +static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + /* The switch internally uses a 8 byte header with length, + * source port, tag, LPA and priority. This is supposedly + * only accessible when operating the switch using the internal + * CPU or with an external CPU mapping the device in, but not + * when operating the switch over SPI and putting frames in/out + * on port 6 (the CPU port). So far we must assume that we + * cannot access the tag. (See "Internal frame header" section + * 3.9.1 in the manual.) + */ + return DSA_TAG_PROTO_NONE; +} + +static int vsc73xx_setup(struct dsa_switch *ds) +{ + struct vsc73xx *vsc = ds->priv; + int i; + + dev_info(vsc->dev, "set up the switch\n"); + + /* Issue RESET */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_MASTER_RESET); + usleep_range(125, 200); + + /* Initialize memory, initialize RAM bank 0..15 except 6 and 7 + * This sequence appears in the + * VSC7385 SparX-G5 datasheet section 6.6.1 + * VSC7395 SparX-G5e datasheet section 6.6.1 + * "initialization sequence". + * No explanation is given to the 0x1010400 magic number. + */ + for (i = 0; i <= 15; i++) { + if (i != 6 && i != 7) { + vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT, + 2, + 0, 0x1010400 + i); + mdelay(1); + } + } + mdelay(30); + + /* Clear MAC table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_MACACCESS, + VSC73XX_MACACCESS_CMD_CLEAR_TABLE); + + /* Clear VLAN table */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_VLANACCESS, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE); + + msleep(40); + + /* Use 20KiB buffers on all ports on VSC7395 + * The VSC7385 has 16KiB buffers and that is the + * default if we don't set this up explicitly. + * Port "31" is "all ports". + */ + if (IS_739X(vsc)) + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f, + VSC73XX_Q_MISC_CONF, + VSC73XX_Q_MISC_CONF_EXTENT_MEM); + + /* Put all ports into reset until enabled */ + for (i = 0; i < 7; i++) { + if (i == 5) + continue; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); + } + + /* MII delay, set both GTX and RX delay to 2 ns */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, + VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS | + VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS); + /* Enable reception of frames on all ports */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK, + 0x5f); + /* IP multicast flood mask (table 144) */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK, + 0xff); + + mdelay(50); + + /* Release reset from the internal PHYs */ + vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, + VSC73XX_GLORESET_PHY_RESET); + + udelay(4); + + return 0; +} + +static void vsc73xx_init_port(struct vsc73xx *vsc, int port) +{ + u32 val; + + /* MAC configure, first reset the port and then write defaults */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Take up the port in 1Gbit mode by default, this will be + * augmented after auto-negotiation on the PHY-facing + * ports. + */ + if (port == CPU_PORT) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAC_CFG, + val | + VSC73XX_MAC_CFG_TX_EN | + VSC73XX_MAC_CFG_RX_EN); + + /* Max length, we can do up to 9.6 KiB, so allow that. + * According to application not "VSC7398 Jumbo Frames" setting + * up the MTU to 9.6 KB does not affect the performance on standard + * frames, so just enable it. It is clear from the application note + * that "9.6 kilobytes" == 9600 bytes. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_MAXLEN, 9600); + + /* Flow control for the CPU port: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY); + + /* Issue pause control frames on PHY facing ports. + * Allow early initiation of MAC transmission if the amount + * of egress data is below 512 bytes on CPU port. + * FIXME: enable 20KiB buffers? + */ + if (port == CPU_PORT) + val = VSC73XX_Q_MISC_CONF_EARLY_TX_512; + else + val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE; + val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_Q_MISC_CONF, + val); + + /* Flow control MAC: a MAC address used in flow control frames */ + val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACHI, + val); + val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]); + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_FCMACLO, + val); + + /* Tell the categorizer to forward pause frames, not control + * frame. Do not drop anything. + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, + VSC73XX_CAT_DROP, + VSC73XX_CAT_DROP_FWD_PAUSE_ENA); + + /* Clear all counters */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + port, VSC73XX_C_RX0, 0); +} + +static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc, + int port, struct phy_device *phydev, + u32 initval) +{ + u32 val = initval; + u8 seed; + + /* Reset this port FIXME: break out subroutine */ + val |= VSC73XX_MAC_CFG_RESET; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Seed the port randomness with randomness */ + get_random_bytes(&seed, 1); + val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; + val |= VSC73XX_MAC_CFG_SEED_LOAD; + val |= VSC73XX_MAC_CFG_WEXC_DIS; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + + /* Flow control for the PHY facing ports: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + * When generating pause frames, use 0xff as pause value + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY | + 0xff); + + /* Disallow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), 0); + + /* Enable TX, RX, deassert reset, stop loading seed */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD | + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN, + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); +} + +static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct vsc73xx *vsc = ds->priv; + u32 val; + + /* Special handling of the CPU-facing port */ + if (port == CPU_PORT) { + /* Other ports are already initialized but not this one */ + vsc73xx_init_port(vsc, CPU_PORT); + /* Select the external port for this interface (EXT_PORT) + * Enable the GMII GTX external clock + * Use double data rate (DDR mode) + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, + CPU_PORT, + VSC73XX_ADVPORTM, + VSC73XX_ADVPORTM_EXT_PORT | + VSC73XX_ADVPORTM_ENA_GTX | + VSC73XX_ADVPORTM_DDR_MODE); + } + + /* This is the MAC confiuration that always need to happen + * after a PHY or the CPU port comes up or down. + */ + if (!phydev->link) { + int maxloop = 10; + + dev_dbg(vsc->dev, "port %d: went down\n", + port); + + /* Disable RX on this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RX_EN, 0); + + /* Discard packets */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), BIT(port)); + + /* Wait until queue is empty */ + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + while (!(val & BIT(port))) { + msleep(1); + vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + if (--maxloop == 0) { + dev_err(vsc->dev, + "timeout waiting for block arbiter\n"); + /* Continue anyway */ + break; + } + } + + /* Put this port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET); + + /* Accept packets again */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), 0); + + /* Allow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), BIT(port)); + + /* Receive mask (disable forwarding) */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), 0); + + return; + } + + /* Figure out what speed was negotiated */ + if (phydev->speed == SPEED_1000) { + dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n", + port); + + /* Set up default for internal port or external RGMII */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + val = VSC73XX_MAC_CFG_1000M_F_RGMII; + else + val = VSC73XX_MAC_CFG_1000M_F_PHY; + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_100) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 100 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else if (phydev->speed == SPEED_10) { + if (phydev->duplex == DUPLEX_FULL) { + val = VSC73XX_MAC_CFG_100_10M_F_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit full duplex mode\n", + port); + } else { + val = VSC73XX_MAC_CFG_100_10M_H_PHY; + dev_dbg(vsc->dev, + "port %d: 10 Mbit half duplex mode\n", + port); + } + vsc73xx_adjust_enable_port(vsc, port, phydev, val); + } else { + dev_err(vsc->dev, + "could not adjust link: unknown speed\n"); + } + + /* Enable port (forwarding) in the receieve mask */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), BIT(port)); +} + +static int vsc73xx_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + dev_info(vsc->dev, "enable port %d\n", port); + vsc73xx_init_port(vsc, port); + + return 0; +} + +static void vsc73xx_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct vsc73xx *vsc = ds->priv; + + /* Just put the port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET); +} + +static const struct vsc73xx_counter * +vsc73xx_find_counter(struct vsc73xx *vsc, + u8 counter, + bool tx) +{ + const struct vsc73xx_counter *cnts; + int num_cnts; + int i; + + if (tx) { + cnts = vsc73xx_tx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_tx_counters); + } else { + cnts = vsc73xx_rx_counters; + num_cnts = ARRAY_SIZE(vsc73xx_rx_counters); + } + + for (i = 0; i < num_cnts; i++) { + const struct vsc73xx_counter *cnt; + + cnt = &cnts[i]; + if (cnt->counter == counter) + return cnt; + } + + return NULL; +} + +static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + const struct vsc73xx_counter *cnt; + struct vsc73xx *vsc = ds->priv; + u8 indices[6]; + int i, j; + u32 val; + int ret; + + if (stringset != ETH_SS_STATS) + return; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_C_CFG, &val); + if (ret) + return; + + indices[0] = (val & 0x1f); /* RX counter 0 */ + indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */ + indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */ + indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */ + indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */ + indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */ + + /* The first counters is the RX octets */ + j = 0; + strncpy(data + j * ETH_GSTRING_LEN, + "RxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + /* Each port supports recording 3 RX counters and 3 TX counters, + * figure out what counters we use in this set-up and return the + * names of them. The hardware default counters will be number of + * packets on RX/TX, combined broadcast+multicast packets RX/TX and + * total error packets RX/TX. + */ + for (i = 0; i < 3; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], false); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } + + /* TX stats begins with the number of TX octets */ + strncpy(data + j * ETH_GSTRING_LEN, + "TxEtherStatsOctets", ETH_GSTRING_LEN); + j++; + + for (i = 3; i < 6; i++) { + cnt = vsc73xx_find_counter(vsc, indices[i], true); + if (cnt) + strncpy(data + j * ETH_GSTRING_LEN, + cnt->name, ETH_GSTRING_LEN); + j++; + } +} + +static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + /* We only support SS_STATS */ + if (sset != ETH_SS_STATS) + return 0; + /* RX and TX packets, then 3 RX counters, 3 TX counters */ + return 8; +} + +static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct vsc73xx *vsc = ds->priv; + u8 regs[] = { + VSC73XX_RXOCT, + VSC73XX_C_RX0, + VSC73XX_C_RX1, + VSC73XX_C_RX2, + VSC73XX_TXOCT, + VSC73XX_C_TX0, + VSC73XX_C_TX1, + VSC73XX_C_TX2, + }; + u32 val; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(regs); i++) { + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port, + regs[i], &val); + if (ret) { + dev_err(vsc->dev, "error reading counter %d\n", i); + return; + } + data[i] = val; + } +} + +static const struct dsa_switch_ops vsc73xx_ds_ops = { + .get_tag_protocol = vsc73xx_get_tag_protocol, + .setup = vsc73xx_setup, + .phy_read = vsc73xx_phy_read, + .phy_write = vsc73xx_phy_write, + .adjust_link = vsc73xx_adjust_link, + .get_strings = vsc73xx_get_strings, + .get_ethtool_stats = vsc73xx_get_ethtool_stats, + .get_sset_count = vsc73xx_get_sset_count, + .port_enable = vsc73xx_port_enable, + .port_disable = vsc73xx_port_disable, +}; + +static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !!(val & BIT(offset)); +} + +static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset), tmp); +} + +static int vsc73xx_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int val) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 tmp = val ? BIT(offset) : 0; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4) | BIT(offset), + BIT(offset + 4) | tmp); +} + +static int vsc73xx_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, BIT(offset + 4), + 0); +} + +static int vsc73xx_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct vsc73xx *vsc = gpiochip_get_data(chip); + u32 val; + int ret; + + ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0, + VSC73XX_GPIO, &val); + if (ret) + return ret; + + return !(val & BIT(offset + 4)); +} + +static int vsc73xx_gpio_probe(struct vsc73xx *vsc) +{ + int ret; + + vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x", + vsc->chipid); + vsc->gc.ngpio = 4; + vsc->gc.owner = THIS_MODULE; + vsc->gc.parent = vsc->dev; + vsc->gc.of_node = vsc->dev->of_node; + vsc->gc.base = -1; + vsc->gc.get = vsc73xx_gpio_get; + vsc->gc.set = vsc73xx_gpio_set; + vsc->gc.direction_input = vsc73xx_gpio_direction_input; + vsc->gc.direction_output = vsc73xx_gpio_direction_output; + vsc->gc.get_direction = vsc73xx_gpio_get_direction; + vsc->gc.can_sleep = true; + ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc); + if (ret) { + dev_err(vsc->dev, "unable to register GPIO chip\n"); + return ret; + } + return 0; +} + +static int vsc73xx_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct vsc73xx *vsc; + int ret; + + vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL); + if (!vsc) + return -ENOMEM; + + spi_set_drvdata(spi, vsc); + vsc->spi = spi_dev_get(spi); + vsc->dev = dev; + mutex_init(&vsc->lock); + + /* Release reset, if any */ + vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(vsc->reset)) { + dev_err(dev, "failed to get RESET GPIO\n"); + return PTR_ERR(vsc->reset); + } + if (vsc->reset) + /* Wait 20ms according to datasheet table 245 */ + msleep(20); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = 8; + ret = spi_setup(spi); + if (ret < 0) { + dev_err(dev, "spi setup failed.\n"); + return ret; + } + + ret = vsc73xx_detect(vsc); + if (ret) { + dev_err(dev, "no chip found (%d)\n", ret); + return -ENODEV; + } + + eth_random_addr(vsc->addr); + dev_info(vsc->dev, + "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n", + vsc->addr[0], vsc->addr[1], vsc->addr[2], + vsc->addr[3], vsc->addr[4], vsc->addr[5]); + + /* The VSC7395 switch chips have 5+1 ports which means 5 + * ordinary ports and a sixth CPU port facing the processor + * with an RGMII interface. These ports are numbered 0..4 + * and 6, so they leave a "hole" in the port map for port 5, + * which is invalid. + * + * The VSC7398 has 8 ports, port 7 is again the CPU port. + * + * We allocate 8 ports and avoid access to the nonexistant + * ports. + */ + vsc->ds = dsa_switch_alloc(dev, 8); + if (!vsc->ds) + return -ENOMEM; + vsc->ds->priv = vsc; + + vsc->ds->ops = &vsc73xx_ds_ops; + ret = dsa_register_switch(vsc->ds); + if (ret) { + dev_err(dev, "unable to register switch (%d)\n", ret); + return ret; + } + + ret = vsc73xx_gpio_probe(vsc); + if (ret) { + dsa_unregister_switch(vsc->ds); + return ret; + } + + return 0; +} + +static int vsc73xx_remove(struct spi_device *spi) +{ + struct vsc73xx *vsc = spi_get_drvdata(spi); + + dsa_unregister_switch(vsc->ds); + gpiod_set_value(vsc->reset, 1); + + return 0; +} + +static const struct of_device_id vsc73xx_of_match[] = { + { + .compatible = "vitesse,vsc7385", + }, + { + .compatible = "vitesse,vsc7388", + }, + { + .compatible = "vitesse,vsc7395", + }, + { + .compatible = "vitesse,vsc7398", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, vsc73xx_of_match); + +static struct spi_driver vsc73xx_driver = { + .probe = vsc73xx_probe, + .remove = vsc73xx_remove, + .driver = { + .name = "vsc73xx", + .of_match_table = vsc73xx_of_match, + }, +}; +module_spi_driver(vsc73xx_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index af766fd61151..6fde68aa13a4 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -81,7 +81,6 @@ source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -128,6 +127,7 @@ config FEALNX cards. <http://www.myson.com.tw/> source "drivers/net/ethernet/natsemi/Kconfig" +source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/netronome/Kconfig" source "drivers/net/ethernet/ni/Kconfig" source "drivers/net/ethernet/8390/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8fbfe9ce2fa5..b45d5f626b59 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ -obj-$(CONFIG_NET_CADENCE) += cadence/ +obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ @@ -36,7 +36,6 @@ obj-$(CONFIG_NET_VENDOR_DEC) += dec/ obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ -obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ @@ -60,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ +obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/ obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/ obj-$(CONFIG_NET_VENDOR_NI) += ni/ obj-$(CONFIG_NET_NETX) += netx-eth.o @@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ obj-$(CONFIG_LPC_ENET) += nxp/ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o -obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ +obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ @@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ obj-$(CONFIG_NET_VENDOR_SIS) += sis/ -obj-$(CONFIG_SFC) += sfc/ -obj-$(CONFIG_SFC_FALCON) += sfc/falcon/ +obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 3872ab96b80a..097467f44b0d 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -802,7 +802,7 @@ static int starfire_init_one(struct pci_dev *pdev, int mii_status; for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) { mdio_write(dev, phy, MII_BMCR, BMCR_RESET); - mdelay(100); + msleep(100); boguscnt = 1000; while (--boguscnt > 0) if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 8f71b79b4949..08945baee48a 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1933,7 +1933,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) while (idx != rxretprd) { struct ring_info *rip; struct sk_buff *skb; - struct rx_desc *rxdesc, *retdesc; + struct rx_desc *retdesc; u32 skbidx; int bd_flags, desc_type, mapsize; u16 csum; @@ -1959,19 +1959,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) case 0: rip = &ap->skb->rx_std_skbuff[skbidx]; mapsize = ACE_STD_BUFSIZE; - rxdesc = &ap->rx_std_ring[skbidx]; std_count++; break; case BD_FLG_JUMBO: rip = &ap->skb->rx_jumbo_skbuff[skbidx]; mapsize = ACE_JUMBO_BUFSIZE; - rxdesc = &ap->rx_jumbo_ring[skbidx]; atomic_dec(&ap->cur_jumbo_bufs); break; case BD_FLG_MINI: rip = &ap->skb->rx_mini_skbuff[skbidx]; mapsize = ACE_MINI_BUFSIZE; - rxdesc = &ap->rx_mini_ring[skbidx]; mini_count++; break; default: diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f2af87d70594..c673ac2df65b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev) #endif /* CONFIG_NET_POLL_CONTROLLER */ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { u16 qid; /* we suspect that this is good for in--kernel network services that @@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_rx_queue_recorded(skb)) qid = skb_get_rx_queue(skb); else - qid = fallback(dev, skb); + qid = fallback(dev, skb, NULL); return qid; } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index be198cc0b10c..f5ad12c10934 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -2036,22 +2036,22 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) } lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->tx_dma_addr) return -ENOMEM; lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->rx_dma_addr) return -ENOMEM; lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->tx_skbuff) return -ENOMEM; lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), - GFP_ATOMIC); + GFP_KERNEL); if (!lp->rx_skbuff) return -ENOMEM; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index cc1e4f820e64..533094233659 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, struct page *pages = NULL; dma_addr_t pages_dma; gfp_t gfp; - int order, ret; + int order; again: order = alloc_order; @@ -316,10 +316,9 @@ again: /* Map the pages */ pages_dma = dma_map_page(pdata->dev, pages, 0, PAGE_SIZE << order, DMA_FROM_DEVICE); - ret = dma_mapping_error(pdata->dev, pages_dma); - if (ret) { + if (dma_mapping_error(pdata->dev, pages_dma)) { put_page(pages); - return ret; + return -ENOMEM; } pa->pages = pages; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index f2d8063a2cef..08c9fa6ca71f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -11,6 +11,7 @@ #include "aq_ethtool.h" #include "aq_nic.h" +#include "aq_vec.h" static void aq_ethtool_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) @@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static int aq_ethtool_nway_reset(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + if (unlikely(!aq_nic->aq_fw_ops->renegotiate)) + return -EOPNOTSUPP; + + if (netif_running(ndev)) + return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw); + + return 0; +} + +static void aq_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + pause->autoneg = 0; + + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + pause->rx_pause = 1; + if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + pause->tx_pause = 1; +} + +static int aq_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; + + if (!aq_nic->aq_fw_ops->set_flow_control) + return -EOPNOTSUPP; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; + + if (pause->rx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX; + + if (pause->tx_pause) + aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX; + else + aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX; + + err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw); + + return err; +} + +static void aq_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + + ring->rx_pending = aq_nic_cfg->rxds; + ring->tx_pending = aq_nic_cfg->txds; + + ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max; + ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max; +} + +static int aq_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + int err = 0; + bool ndev_running = false; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic); + const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + err = -EOPNOTSUPP; + goto err_exit; + } + + if (netif_running(ndev)) { + ndev_running = true; + dev_close(ndev); + } + + aq_nic_free_vectors(aq_nic); + + aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min); + aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max); + aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE); + + aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min); + aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max); + aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE); + + for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs; + aq_nic->aq_vecs++) { + aq_nic->aq_vec[aq_nic->aq_vecs] = + aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg); + if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) { + err = -ENOMEM; + goto err_exit; + } + } + if (ndev_running) + err = dev_open(ndev); + +err_exit: + return err; +} + const struct ethtool_ops aq_ethtool_ops = { .get_link = aq_ethtool_get_link, .get_regs_len = aq_ethtool_get_regs_len, @@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .nway_reset = aq_ethtool_nway_reset, + .get_ringparam = aq_get_ringparam, + .set_ringparam = aq_set_ringparam, + .get_pauseparam = aq_ethtool_get_pauseparam, + .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, .get_rxfh = aq_ethtool_get_rss, .get_rxnfc = aq_ethtool_get_rxnfc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 2c6ebd91a9f2..5c00671f248d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -24,8 +24,10 @@ struct aq_hw_caps_s { u64 link_speed_msk; unsigned int hw_priv_flags; u32 media_type; - u32 rxds; - u32 txds; + u32 rxds_max; + u32 txds_max; + u32 rxds_min; + u32 txds_min; u32 txhwb_alignment; u32 irq_mask; u32 vecs; @@ -98,6 +100,9 @@ struct aq_stats_s { #define AQ_HW_MEDIA_TYPE_TP 1U #define AQ_HW_MEDIA_TYPE_FIBRE 2U +#define AQ_HW_TXD_MULTIPLE 8U +#define AQ_HW_RXD_MULTIPLE 8U + #define AQ_HW_MULTICAST_ADDRESS_MAX 32U struct aq_hw_s { @@ -199,25 +204,30 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_deinit)(struct aq_hw_s *self); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { int (*init)(struct aq_hw_s *self); + int (*deinit)(struct aq_hw_s *self); + int (*reset)(struct aq_hw_s *self); + int (*renegotiate)(struct aq_hw_s *self); + int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac); int (*set_link_speed)(struct aq_hw_s *self, u32 speed); - int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); + int (*set_state)(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); int (*update_link_status)(struct aq_hw_s *self); int (*update_stats)(struct aq_hw_s *self); + + int (*set_flow_control)(struct aq_hw_s *self); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 7a22d0257e04..26dc6782b475 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) aq_nic_rss_init(self, cfg->num_rss_queues); /*descriptors */ - cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF); - cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF); + cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); + cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); /*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); @@ -768,10 +768,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - if (self->aq_nic_cfg.flow_control) + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); else @@ -886,7 +890,7 @@ void aq_nic_deinit(struct aq_nic_s *self) aq_vec_deinit(aq_vec); if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_hw_ops->hw_deinit(self->aq_hw); + (void)self->aq_fw_ops->deinit(self->aq_hw); } else { (void)self->aq_hw_ops->hw_set_power(self->aq_hw, self->power_state); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 8cc6abadc03b..97addfa6f895 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -19,29 +19,31 @@ #include "hw_atl_a0_internal.h" #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs = HW_ATL_A0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_A0_RXD_SIZE, \ - .rxds = 248U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_A0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_A0_TX_RINGS, \ - .rx_rings = HW_ATL_A0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds_max = HW_ATL_A0_MAX_RXD, \ + .rxds_min = HW_ATL_A0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds_max = HW_ATL_A0_MAX_TXD, \ + .txds_min = HW_ATL_A0_MIN_RXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO, \ .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_A0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { @@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1d8855558d74..3c94cff57876 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -88,4 +88,12 @@ #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U +#define HW_ATL_A0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_A0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_A0_MAX_RXD 8184U +#define HW_ATL_A0_MAX_TXD 8184U + #endif /* HW_ATL_A0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 956860a69797..4809bf4baa34 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -20,30 +20,32 @@ #include "hw_atl_llh_internal.h" #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_B0_RSS_MAX, \ - .tcs = HW_ATL_B0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_B0_RXD_SIZE, \ - .rxds = 4U * 1024U, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_B0_TXD_SIZE, \ - .txds = 8U * 1024U, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_B0_TX_RINGS, \ - .rx_rings = HW_ATL_B0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_SG | \ - NETIF_F_TSO | \ - NETIF_F_LRO, \ - .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_B0_MTU_JUMBO, \ - .mac_regs_count = 88, \ + .is_64_dma = true, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_B0_RSS_MAX, \ + .tcs = HW_ATL_B0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_B0_RXD_SIZE, \ + .rxds_max = HW_ATL_B0_MAX_RXD, \ + .rxds_min = HW_ATL_B0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_B0_TXD_SIZE, \ + .txds_max = HW_ATL_B0_MAX_TXD, \ + .txds_min = HW_ATL_B0_MIN_TXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_B0_TX_RINGS, \ + .rx_rings = HW_ATL_B0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_LRO, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_B0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { @@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_deinit = hw_atl_utils_hw_deinit, .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 405d1455c222..28568f5fa74b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -142,6 +142,14 @@ #define HW_ATL_INTR_MODER_MAX 0x1FF #define HW_ATL_INTR_MODER_MIN 0xFF +#define HW_ATL_B0_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL_B0_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL_B0_MAX_RXD 8184U +#define HW_ATL_B0_MAX_TXD 8184U + /* HW layer capabilities */ #endif /* HW_ATL_B0_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index e652d86b87d4..c965e65d07db 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -30,10 +30,11 @@ #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU -#define HW_ATL_MPI_STATE_MSK 0x00FFU -#define HW_ATL_MPI_STATE_SHIFT 0U -#define HW_ATL_MPI_SPEED_MSK 0xFFFF0000U -#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_STATE_MSK 0x00FFU +#define HW_ATL_MPI_STATE_SHIFT 0U +#define HW_ATL_MPI_SPEED_MSK 0x00FF0000U +#define HW_ATL_MPI_SPEED_SHIFT 16U +#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U #define HW_ATL_MPI_DAISY_CHAIN_STATUS 0x704 #define HW_ATL_MPI_BOOT_EXIT_CODE 0x388 @@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed) { u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); - val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT); + val = val & ~HW_ATL_MPI_SPEED_MSK; + val |= speed << HW_ATL_MPI_SPEED_SHIFT; aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); return 0; } -void hw_atl_utils_mpi_set(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state, - u32 speed) +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) { int err = 0; u32 transaction_id = 0; struct hw_aq_atl_utils_mbox_header mbox; + u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { hw_atl_utils_mpi_read_mbox(self, &mbox); @@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, if (err < 0) goto err_exit; } + /* On interface DEINIT we disable DW (raise bit) + * Otherwise enable DW (clear bit) + */ + if (state == MPI_DEINIT || state == MPI_POWER) + val |= HW_ATL_MPI_DIRTY_WAKE_MSK; + else + val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK; - aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, - (speed << HW_ATL_MPI_SPEED_SHIFT) | state); - -err_exit:; -} - -static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, - enum hal_atl_utils_fw_state_e state) -{ - u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); + /* Set new state bits */ + val = val & ~HW_ATL_MPI_STATE_MSK; + val |= state & HW_ATL_MPI_STATE_MSK; - val = state | (val & HW_ATL_MPI_SPEED_MSK); aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val); - return 0; +err_exit: + return err; } int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) @@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) *p = chip_features; } -int hw_atl_utils_hw_deinit(struct aq_hw_s *self) +static int hw_atl_fw1x_deinit(struct aq_hw_s *self) { - hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); return 0; } int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state) { - hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U); + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); return 0; } @@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, + .deinit = hw_atl_fw1x_deinit, .reset = NULL, .get_mac_permanent = hw_atl_utils_get_mac_permanent, .set_link_speed = hw_atl_utils_mpi_set_speed, .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index cd8f18f39c61..b875590efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi { CAPS_HI_TRANSACTION_ID, }; +enum hw_atl_fw2x_ctrl { + CTRL_RESERVED1 = 0x00, + CTRL_RESERVED2, + CTRL_RESERVED3, + CTRL_PAUSE, + CTRL_ASYMMETRIC_PAUSE, + CTRL_RESERVED4, + CTRL_RESERVED5, + CTRL_RESERVED6, + CTRL_1GBASET_FD_EEE, + CTRL_2P5GBASET_FD_EEE, + CTRL_5GBASET_FD_EEE, + CTRL_10GBASET_FD_EEE, + CTRL_THERMAL_SHUTDOWN, + CTRL_PHY_LOGS, + CTRL_EEE_AUTO_DISABLE, + CTRL_PFC, + CTRL_WAKE_ON_LINK, + CTRL_CABLE_DIAG, + CTRL_TEMPERATURE, + CTRL_DOWNSHIFT, + CTRL_PTP_AVB, + CTRL_RESERVED7, + CTRL_LINK_DROP, + CTRL_SLEEP_PROXY, + CTRL_WOL, + CTRL_MAC_STOP, + CTRL_EXT_LOOPBACK, + CTRL_INT_LOOPBACK, + CTRL_RESERVED8, + CTRL_WOL_TIMER, + CTRL_STATISTICS, + CTRL_FORCE_RECONNECT, +}; + struct aq_hw_s; struct aq_fw_ops; struct aq_hw_caps_s; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 39cd3a27fe77..e37943760a58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -28,6 +28,10 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); +static int aq_fw2x_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state); + static int aq_fw2x_init(struct aq_hw_s *self) { int err = 0; @@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self) return err; } +static int aq_fw2x_deinit(struct aq_hw_s *self) +{ + int err = aq_fw2x_set_link_speed(self, 0); + + if (!err) + err = aq_fw2x_set_state(self, MPI_DEINIT); + + return err; +} + static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) { enum hw_atl_fw2x_rate rate = 0; @@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) return 0; } +static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) +{ + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX) + *mpi_state |= BIT(CAPS_HI_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_PAUSE); + + if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX) + *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE); + else + *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { - /* No explicit state in 2x fw */ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + switch (state) { + case MPI_INIT: + mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + break; + case MPI_DEINIT: + mpi_state |= BIT(CAPS_HI_LINK_DROP); + break; + case MPI_RESET: + case MPI_POWER: + /* No actions */ + break; + } + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); return 0; } @@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_renegotiate(struct aq_hw_s *self) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + mpi_opts |= BIT(CTRL_FORCE_RECONNECT); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_set_flow_control(struct aq_hw_s *self) +{ + u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_set_mpi_flow_control(self, &mpi_state); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state); + + return 0; +} + const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, + .deinit = aq_fw2x_deinit, .reset = NULL, + .renegotiate = aq_fw2x_renegotiate, .get_mac_permanent = aq_fw2x_get_mac_permanent, .set_link_speed = aq_fw2x_set_link_speed, .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, + .set_flow_control = aq_fw2x_set_flow_control, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index a445de6837a6..94efc6477bdc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,8 +12,8 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 2 -#define NIC_REVISION_DRIVER_VERSION 1 +#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5e5022fa1d04..6d3221134927 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx) struct alx_hw *hw = &alx->hw; unsigned long flags; int old_speed; - u8 old_duplex; int err; /* clear PHY internal interrupt status, otherwise the main @@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx) alx_clear_phy_intr(hw); old_speed = hw->link_speed; - old_duplex = hw->duplex; err = alx_read_phy_link(hw); if (err < 0) goto reset; diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index 8ba7f8ff3434..392f564d8fd4 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig @@ -1,5 +1,6 @@ config NET_VENDOR_AURORA bool "Aurora VLSI devices" + default y help If you have a network (Ethernet) device belonging to this class, say Y. diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index e94159507847..c8d1f8fa4713 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget) again: do { - struct nb8800_rx_buf *rxb; unsigned int len; next = (last + 1) % RX_DESC_COUNT; - rxb = &priv->rx_bufs[next]; rxd = &priv->rx_descs[next]; if (!rxd->report) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 4c3bfde6e8de..b7aa8ad96dfb 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,7 +61,7 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on OF && HAS_IOMEM + depends on HAS_IOMEM select MII select PHYLIB select FIXED_PHY @@ -181,7 +181,7 @@ config BGMAC_PLATFORM config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" - depends on OF + depends on HAS_IOMEM depends on NET_DSA || !NET_DSA select MII select PHYLIB diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index a1f60f89e059..631617d95769 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { }; static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct bcm_sysport_priv *priv = netdev_priv(dev); @@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, unsigned int q, port; if (!netdev_uses_dsa(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); /* DSA tagging layer will have configured the correct queue */ q = BRCM_TAG_GET_QUEUE(queue); @@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; if (unlikely(!tx_ring)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); return tx_ring->index; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index e6ea8e61f96d..4c94d9218bba 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { struct device *dma_dev = bgmac->dma_dev; int empty_slot; - bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; /* The last slot that hardware didn't consume yet */ @@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) slot->dma_addr = 0; ring->start++; - freed = true; } if (!pkts_compl) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index af7b5a4d8ba0..5a727d4729da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); @@ -1932,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); + return fallback(dev, skb, NULL) % + (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); } void bnx2x_set_num_queues(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a8ce5c55bbb0..0e508e5defce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 22243c480a05..98d4c5a3ff21 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; + /* else: fall through */ case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params, switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_10M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_10M_FULL: phy->req_line_speed = SPEED_10; break; case PORT_FEATURE_LINK_SPEED_100M_HALF: phy->req_duplex = DUPLEX_HALF; + /* fall through */ case PORT_FEATURE_LINK_SPEED_100M_FULL: phy->req_line_speed = SPEED_100; break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 57348f2b49a3..71362b7f6040 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp) bp->num_queues, 1 + bp->num_cnic_queues); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_MSI: bnx2x_enable_msi(bp); - /* falling through... */ + /* fall through */ case BNX2X_INT_MODE_INTX: bp->num_ethernet_queues = 1; bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8baf9d3eb4b1..3f4d2c8da21a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: @@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: o->set_registry_size(o, 0); - /* Don't break */ + /* fall through */ /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index dc77bfded865..62da46537734 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1827,6 +1827,7 @@ get_vf: DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); + /* fall through */ case EVENT_RING_OPCODE_VF_FLR: /* Do nothing for now */ return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4394c1162be4..c612d74451a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp, speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); - /* fall thru */ + /* fall through */ } case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); @@ -3012,13 +3012,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; - if (bp->hwrm_dbg_resp_addr) { - dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE, - bp->hwrm_dbg_resp_addr, - bp->hwrm_dbg_resp_dma_addr); - - bp->hwrm_dbg_resp_addr = NULL; - } } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3030,12 +3023,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp) GFP_KERNEL); if (!bp->hwrm_cmd_resp_addr) return -ENOMEM; - bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, - HWRM_DBG_REG_BUF_SIZE, - &bp->hwrm_dbg_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_dbg_resp_addr) - netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); return 0; } @@ -7991,7 +7978,7 @@ static int bnxt_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, - bp, bp); + bp, bp, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 91575ef97c8c..934aa11c82eb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1287,9 +1287,6 @@ struct bnxt { dma_addr_t hwrm_short_cmd_req_dma_addr; void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_dbg_resp_addr; - dma_addr_t hwrm_dbg_resp_dma_addr; -#define HWRM_DBG_REG_BUF_SIZE 128 struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 402fa32f7a88..7bd96ab4f7c5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +static const struct bnxt_dl_nvm_param nvm_params[] = { + {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, + BNXT_NVM_SHARED_CFG, 1}, +}; + +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + int msg_len, union devlink_param_value *val) +{ + struct hwrm_nvm_variable_input *req = msg; + void *data_addr = NULL, *buf = NULL; + struct bnxt_dl_nvm_param nvm_param; + int bytesize, idx = 0, rc, i; + dma_addr_t data_dma_addr; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) + return -EPERM; + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + if (nvm_params[i].id == param_id) { + nvm_param = nvm_params[i]; + break; + } + } + + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + idx = bp->pf.port_id; + else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; + + bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; + if (nvm_param.num_bits == 1) + buf = &val->vbool; + + data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, + &data_dma_addr, GFP_KERNEL); + if (!data_addr) + return -ENOMEM; + + req->data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(nvm_param.num_bits); + req->option_num = cpu_to_le16(nvm_param.offset); + req->index_0 = cpu_to_le16(idx); + if (idx) + req->dimensions = cpu_to_le16(1); + + if (req->req_type == HWRM_NVM_SET_VARIABLE) + memcpy(data_addr, buf, bytesize); + + rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE) + memcpy(buf, data_addr, bytesize); + + dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); + if (rc) + return -EIO; + return 0; +} + +static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_get_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_set_variable_input req = {0}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); +} + +static const struct devlink_param bnxt_dl_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), +}; + int bnxt_dl_register(struct bnxt *bp) { struct devlink *dl; int rc; - if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) - return 0; - - if (bp->hwrm_spec_code < 0x10803) { - netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + if (bp->hwrm_spec_code < 0x10600) { + netdev_warn(bp->dev, "Firmware does not support NVM params"); return -ENOTSUPP; } @@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp) } bnxt_link_bp_to_dl(bp, dl); - bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + /* Add switchdev eswitch mode setting, if SRIOV supported */ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && + bp->hwrm_spec_code > 0x10803) + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); if (rc) { - bnxt_link_bp_to_dl(bp, NULL); - devlink_free(dl); netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); - return rc; + goto err_dl_free; + } + + rc = devlink_params_register(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + if (rc) { + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", + rc); + goto err_dl_unreg; } return 0; + +err_dl_unreg: + devlink_unregister(dl); +err_dl_free: + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + return rc; } void bnxt_dl_unregister(struct bnxt *bp) @@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index e92a35d8b642..2f68dc048390 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_ENABLE_SRIOV 401 + +enum bnxt_nvm_dir_type { + BNXT_NVM_SHARED_CFG = 40, + BNXT_NVM_PORT_CFG, + BNXT_NVM_FUNC_CFG, +}; + +struct bnxt_dl_nvm_param { + u16 id; + u16 offset; + u16 dir_type; + u16 num_bits; +}; + int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0fe0ea8dce6c..c75d7fa6dab6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -6201,6 +6201,19 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +struct hwrm_nvm_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 data_addr; + __le16 data_len; + __le16 option_num; + __le16 dimensions; + __le16 index_0; +}; + /* hwrm_nvm_get_variable_input (size:320b/40B) */ struct hwrm_nvm_get_variable_input { __le16 req_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 491bd40a254d..139d96c5a023 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1568,22 +1568,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp) int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { - int rc = 0; - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); - break; - + return bnxt_tc_add_flow(bp, src_fid, cls_flower); case TC_CLSFLOWER_DESTROY: - rc = bnxt_tc_del_flow(bp, cls_flower); - break; - + return bnxt_tc_del_flow(bp, cls_flower); case TC_CLSFLOWER_STATS: - rc = bnxt_tc_get_flow_stats(bp, cls_flower); - break; + return bnxt_tc_get_flow_stats(bp, cls_flower); + default: + return -EOPNOTSUPP; } - return rc; } static const struct rhashtable_params bnxt_tc_flow_ht_params = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 05d405905906..e31f5d803c13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, - vf_rep, vf_rep); + vf_rep, vf_rep, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); @@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); + rc = -ENOTSUPP; + goto done; + } + if (pci_num_vf(bp->pdev) == 0) { - netdev_info(bp->dev, - "Enable VFs before setting switchdev mode"); + netdev_info(bp->dev, "Enable VFs before setting switchdev mode"); rc = -EPERM; goto done; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1f0e872d0667..0584d07c8c33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) rc = bnxt_xdp_set(bp, xdp->prog); break; case XDP_QUERY_PROG: - xdp->prog_attached = !!bp->xdp_prog; xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; rc = 0; break; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 4fd829b5e65d..d83233ae4a15 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) { - struct fcoe_kwqe_destroy *req; union l5cm_specific_data l5_data; struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); @@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); - req = (struct fcoe_kwqe_destroy *) kwqe; cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); memset(&l5_data, 0, sizeof(l5_data)); @@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - kfree(cp->csk_tbl); + kvfree(cp->csk_tbl); cp->csk_tbl = NULL; cnic_free_id_tbl(&cp->csk_port_tbl); } @@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; u32 port_id; - cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), - GFP_KERNEL); + cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock), + GFP_KERNEL); if (!cp->csk_tbl) return -ENOMEM; @@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) struct cnic_local *cp = dev->cnic_priv; struct bnx2x *bp = netdev_priv(dev->netdev); struct cnic_eth_dev *ethdev = cp->ethdev; - int func, ret; + int ret; u32 pfid; dev->stats_addr = ethdev->addr_drv_info_to_mcp; cp->func = bp->pf_num; - func = CNIC_FUNC(cp); pfid = bp->pfid; ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index aa1374d0af93..d8dad07f826a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -725,6 +725,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -785,6 +786,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; + /* else: fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -10719,28 +10721,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + /* fall through */ case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + /* fall through */ case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + /* fall through */ case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + /* fall through */ case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + /* fall through */ case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + /* fall through */ case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + /* fall through */ case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + /* fall through */ case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + /* fall through */ case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + /* fall through */ case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + /* fall through */ case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + /* fall through */ case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 427d65a1a126..b9984015ca8c 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -2,7 +2,7 @@ # Atmel device configuration # -config NET_CADENCE +config NET_VENDOR_CADENCE bool "Cadence devices" depends on HAS_IOMEM default y @@ -16,7 +16,7 @@ config NET_CADENCE the remaining Atmel network card questions. If you say Y, you will be asked for your specific card in the following questions. -if NET_CADENCE +if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" @@ -48,4 +48,4 @@ config MACB_PCI To compile this driver as a module, choose M here: the module will be called macb_pci. -endif # NET_CADENCE +endif # NET_VENDOR_CADENCE diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 92d88c5f76fb..5f03199a3acf 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -4,7 +4,6 @@ config NET_VENDOR_CAVIUM bool "Cavium ethernet drivers" - depends on PCI default y ---help--- Select this option if you want enable Cavium network support. @@ -36,7 +35,7 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC @@ -46,7 +45,7 @@ config THUNDER_NIC_RGX tristate "Thunder MAC interface driver (RGX)" depends on 64BIT && PCI select PHYLIB - select MDIO_THUNDER + select MDIO_THUNDER if PCI ---help--- This driver supports configuring XCV block of RGX interface present on CN81XX chip. @@ -67,6 +66,7 @@ config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT && PCI depends on MAY_USE_DEVLINK + depends on PCI imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 929d485a3a2f..9f4f3c1d5043 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) for (q_no = srn; q_no < ern; q_no++) { reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; @@ -1414,50 +1417,6 @@ int validate_cn23xx_pf_config_info(struct octeon_device *oct, return 0; } -void cn23xx_dump_iq_regs(struct octeon_device *oct) -{ - u32 regval, q_no; - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_DOORBELL(0), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_DOORBELL(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_BASE_ADDR64(0), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_BASE_ADDR64(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", - CN23XX_SLI_IQ_SIZE(0), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_IQ_SIZE(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_CTL_STATUS [0x%x]: 0x%016llx\n", - CN23XX_SLI_CTL_STATUS, - CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_CTL_STATUS))); - - for (q_no = 0; q_no < CN23XX_MAX_INPUT_QUEUES; q_no++) { - dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", - q_no, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), - CVM_CAST64(octeon_read_csr64 - (oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)))); - } - - pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); - dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", - CN23XX_CONFIG_PCIE_DEVCTL, regval); - - dev_dbg(&oct->pci_dev->dev, "SLI_PRT[%d]_CFG [0x%llx]: 0x%016llx\n", - oct->pcie_port, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), - CVM_CAST64(lio_pci_readq( - oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_S2M_PORT[%d]_CTL [0x%x]: 0x%016llx\n", - oct->pcie_port, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); -} - int cn23xx_fw_loaded(struct octeon_device *oct) { u64 val; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 9338a0008378..962bb62933db 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) reg_val = octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + /* set DPTR */ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; @@ -379,7 +382,7 @@ void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct) mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = NULL; - mbox_cmd.fn_arg = 0; + mbox_cmd.fn_arg = NULL; octeon_mbox_write(oct, &mbox_cmd); } @@ -679,33 +682,3 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) return 0; } - -void cn23xx_dump_vf_iq_regs(struct octeon_device *oct) -{ - u32 regval, q_no; - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_DOORBELL_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_DOORBELL(0), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_DOORBELL(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_BASEADDR_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_BASE_ADDR64(0), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(0)))); - - dev_dbg(&oct->pci_dev->dev, "SLI_IQ_FIFO_RSIZE_0 [0x%x]: 0x%016llx\n", - CN23XX_VF_SLI_IQ_SIZE(0), - CVM_CAST64(octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_SIZE(0)))); - - for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { - dev_dbg(&oct->pci_dev->dev, "SLI_PKT[%d]_INPUT_CTL [0x%x]: 0x%016llx\n", - q_no, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), - CVM_CAST64(octeon_read_csr64( - oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)))); - } - - pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); - dev_dbg(&oct->pci_dev->dev, "Config DevCtl [0x%x]: 0x%08x\n", - CN23XX_CONFIG_PCIE_DEVCTL, regval); -} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 7e8454d3b1ad..8ef87a76692b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -687,7 +687,7 @@ static void lio_sync_octeon_time(struct work_struct *work) lt = (struct lio_time *)sc->virtdptr; /* Get time of the day */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); lt->sec = ts.tv_sec; lt->nsec = ts.tv_nsec; octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); @@ -2631,7 +2631,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; @@ -2909,7 +2909,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3068,7 +3068,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = linkstate; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; nctrl.wait_time = LIO_CMD_WAIT_TM; octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3302,7 +3302,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) { struct lio *lio = NULL; struct net_device *netdev; - u8 mac[6], i, j, *fw_ver; + u8 mac[6], i, j, *fw_ver, *micro_ver; + unsigned long micro; + u32 cur_ver; struct octeon_soft_command *sc; struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; @@ -3432,6 +3434,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) fw_ver); } + /* extract micro version field; point past '<maj>.<min>.' */ + micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; + if (kstrtoul(micro_ver, 10, µ) != 0) + micro = 0; + octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; + octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; + octeon_dev->fw_info.ver.rev = micro; + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -3572,9 +3582,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { u8 vfmac[ETH_ALEN]; - random_ether_addr(&vfmac[0]); - if (__liquidio_set_vf_mac(netdev, j, - &vfmac[0], false)) { + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); @@ -3675,7 +3684,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { - liquidio_get_speed(lio); + cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, + octeon_dev->fw_info.ver.min, + octeon_dev->fw_info.ver.rev); + + /* speed control unsupported in f/w older than 1.7.2 */ + if (cur_ver < OCT_FW_VER(1, 7, 2)) { + dev_info(&octeon_dev->pci_dev->dev, + "speed setting not supported by f/w."); + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } else { + liquidio_get_speed(lio); + } if (octeon_dev->speed_setting == 0) { octeon_dev->speed_setting = 25; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 7fa0212873ac..b77835724dc8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); } return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 7f97ae48efed..0cc2338d8d2a 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, * * Octeon always uses UTC time. so timezone information is not sent. */ - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); ret = snprintf(boottime, MAX_BOOTTIME_SIZE, " time_sec=%lld time_nsec=%ld", (s64)ts.tv_sec, ts.tv_nsec); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 94a4ed88d618..d99ca6ba23a4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -288,8 +288,17 @@ struct oct_fw_info { */ u32 app_mode; char liquidio_firmware_version[32]; + /* Fields extracted from legacy string 'liquidio_firmware_version' */ + struct { + u8 maj; + u8 min; + u8 rev; + } ver; }; +#define OCT_FW_VER(maj, min, rev) \ + (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev))) + /* wrappers around work structs */ struct cavium_wk { struct delayed_work work; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 1f2e75da28f8..d5d9e47daa4b 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); - dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", - iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", + iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 135766c4296b..768f584f8392 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!nic->xdp_prog; xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 3c5057868ab3..36d25883d123 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -120,6 +120,8 @@ struct cudbg_mem_desc { u32 idx; }; +#define CUDBG_MEMINFO_REV 1 + struct cudbg_meminfo { struct cudbg_mem_desc avail[4]; struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3]; @@ -137,6 +139,9 @@ struct cudbg_meminfo { u32 port_alloc[4]; u32 loopback_used[NCHAN]; u32 loopback_alloc[NCHAN]; + u32 p_structs_free_cnt; + u32 free_rx_cnt; + u32 free_tx_cnt; }; struct cudbg_cim_pif_la { @@ -281,12 +286,18 @@ struct cudbg_tid_data { #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 +#define CUDBG_NUM_ULPTX_ASIC 6 +#define CUDBG_NUM_ULPTX_ASIC_READ 128 + +#define CUDBG_ULPTX_LA_REV 1 struct cudbg_ulptx_la { u32 rdptr[CUDBG_NUM_ULPTX]; u32 wrptr[CUDBG_NUM_ULPTX]; u32 rddata[CUDBG_NUM_ULPTX]; u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; + u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ]; + u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC]; }; #define CUDBG_CHAC_PBT_ADDR 0x2800 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 0afcfe99bff3..d97e0d7e541a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -349,6 +349,11 @@ int cudbg_fill_meminfo(struct adapter *padap, meminfo_buff->up_extmem2_hi = hi; lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A); + for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) + meminfo_buff->free_rx_cnt += + FREERXPAGECOUNT_G(t4_read_reg(padap, + TP_FLM_FREE_RX_CNT_A)); + meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); meminfo_buff->rx_pages_data[1] = t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; @@ -356,6 +361,11 @@ int cudbg_fill_meminfo(struct adapter *padap, lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A); hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A); + for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) + meminfo_buff->free_tx_cnt += + FREETXPAGECOUNT_G(t4_read_reg(padap, + TP_FLM_FREE_TX_CNT_A)); + meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); meminfo_buff->tx_pages_data[1] = hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); @@ -364,6 +374,8 @@ int cudbg_fill_meminfo(struct adapter *padap, meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A); + meminfo_buff->p_structs_free_cnt = + FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A)); for (i = 0; i < 4; i++) { if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) @@ -1465,14 +1477,23 @@ int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_meminfo *meminfo_buff; + struct cudbg_ver_hdr *ver_hdr; int rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo), &temp_buff); if (rc) return rc; - meminfo_buff = (struct cudbg_meminfo *)temp_buff.data; + ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_MEMINFO_REV; + ver_hdr->size = sizeof(struct cudbg_meminfo); + + meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data + + sizeof(*ver_hdr)); rc = cudbg_fill_meminfo(padap, meminfo_buff); if (rc) { cudbg_err->sys_err = rc; @@ -2586,15 +2607,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ulptx_la *ulptx_la_buff; + struct cudbg_ver_hdr *ver_hdr; u32 i, j; int rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la), + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la), &temp_buff); if (rc) return rc; - ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data; + ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_ULPTX_LA_REV; + ver_hdr->size = sizeof(struct cudbg_ulptx_la); + + ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + + sizeof(*ver_hdr)); for (i = 0; i < CUDBG_NUM_ULPTX; i++) { ulptx_la_buff->rdptr[i] = t4_read_reg(padap, ULP_TX_LA_RDPTR_0_A + @@ -2610,6 +2640,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, t4_read_reg(padap, ULP_TX_LA_RDDATA_0_A + 0x10 * i); } + + for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { + t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1); + ulptx_la_buff->rdptr_asic[i] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A); + ulptx_la_buff->rddata_asic[i][0] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A); + ulptx_la_buff->rddata_asic[i][1] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A); + ulptx_la_buff->rddata_asic[i][2] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A); + ulptx_la_buff->rddata_asic[i][3] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A); + ulptx_la_buff->rddata_asic[i][4] = + t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A); + ulptx_la_buff->rddata_asic[i][5] = + t4_read_reg(padap, PM_RX_BASE_ADDR); + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0dbe2d9e22d6..3da9299cd786 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,6 +46,7 @@ #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/vmalloc.h> +#include <linux/rhashtable.h> #include <linux/etherdevice.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -319,6 +320,21 @@ struct vpd_params { u8 na[MACADDR_LEN + 1]; }; +/* Maximum resources provisioned for a PCI PF. + */ +struct pf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + struct pci_params { unsigned int vpd_cap_addr; unsigned char speed; @@ -346,6 +362,7 @@ struct adapter_params { struct sge_params sge; struct tp_params tp; struct vpd_params vpd; + struct pf_resources pfres; struct pci_params pci; struct devlog_params devlog; enum pcie_memwin drv_memwin; @@ -521,6 +538,15 @@ enum { MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, }; +enum { + PRIV_FLAG_PORT_TX_VM_BIT, +}; + +#define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT) + +#define PRIV_FLAGS_ADAP 0 +#define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM + struct adapter; struct sge_rspq; @@ -557,6 +583,7 @@ struct port_info { struct hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; + u32 eth_flags; }; struct dentry; @@ -867,6 +894,7 @@ struct adapter { unsigned int flags; unsigned int adap_idx; enum chip_type chip; + u32 eth_flags; int msg_enable; __be16 vxlan_port; @@ -956,6 +984,7 @@ struct adapter { struct chcr_stats_debug chcr_stats; /* TC flower offload */ + bool tc_flower_initialized; struct rhashtable flower_tbl; struct rhashtable_params flower_ht_params; struct timer_list flower_stats_timer; @@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); @@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, bool enable); int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 8d751efcb90e..5f01c0a7fd98 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -224,7 +224,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); break; case CUDBG_MEMINFO: - len = sizeof(struct cudbg_meminfo); + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo); break; case CUDBG_CIM_PIF_LA: len = sizeof(struct cudbg_cim_pif_la); @@ -273,7 +274,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } break; case CUDBG_ULPTX_LA: - len = sizeof(struct cudbg_ulptx_la); + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: n = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index c301aaf79d64..6f312e03432f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2414,6 +2414,44 @@ static const struct file_operations rss_vf_config_debugfs_fops = { .release = seq_release_private }; +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct pf_resources *pfres = &adapter->params.pfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", pfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_debugfs_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + /** * ethqset2pinfo - return port_info of an Ethernet Queue Set * @adap: the adapter @@ -2436,16 +2474,64 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) return NULL; } +static int sge_qinfo_uld_txq_entries(const struct adapter *adap, int uld) +{ + const struct sge_uld_txq_info *utxq_info = adap->sge.uld_txq_info[uld]; + + if (!utxq_info) + return 0; + + return DIV_ROUND_UP(utxq_info->ntxq, 4); +} + +static int sge_qinfo_uld_rspq_entries(const struct adapter *adap, int uld, + bool ciq) +{ + const struct sge_uld_rxq_info *urxq_info = adap->sge.uld_rxq_info[uld]; + + if (!urxq_info) + return 0; + + return ciq ? DIV_ROUND_UP(urxq_info->nciq, 4) : + DIV_ROUND_UP(urxq_info->nrxq, 4); +} + +static int sge_qinfo_uld_rxq_entries(const struct adapter *adap, int uld) +{ + return sge_qinfo_uld_rspq_entries(adap, uld, false); +} + +static int sge_qinfo_uld_ciq_entries(const struct adapter *adap, int uld) +{ + return sge_qinfo_uld_rspq_entries(adap, uld, true); +} + static int sge_qinfo_show(struct seq_file *seq, void *v) { + int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; + int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; + int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; + const struct sge_uld_txq_info *utxq_info; + const struct sge_uld_rxq_info *urxq_info; struct adapter *adap = seq->private; - int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); - int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); - int i, r = (uintptr_t)v - 1; - int ofld_idx = r - eth_entries; - int ctrl_idx = ofld_idx - ofld_entries; - int fq_idx = ctrl_idx - ctrl_entries; + int i, n, r = (uintptr_t)v - 1; + int eth_entries, ctrl_entries; + struct sge *s = &adap->sge; + + eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); + ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); + + mutex_lock(&uld_mutex); + if (s->uld_txq_info) + for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++) + uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i); + + if (s->uld_rxq_info) { + for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) { + uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i); + uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i); + } + } if (r) seq_putc(seq, '\n'); @@ -2467,9 +2553,10 @@ do { \ if (r < eth_entries) { int base_qset = r * 4; - const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset]; - const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset]; - int n = min(4, adap->sge.ethqsets - 4 * r); + const struct sge_eth_rxq *rx = &s->ethrxq[base_qset]; + const struct sge_eth_txq *tx = &s->ethtxq[base_qset]; + + n = min(4, s->ethqsets - 4 * r); S("QType:", "Ethernet"); S("Interface:", @@ -2494,8 +2581,7 @@ do { \ R("RspQ CIDX:", rspq.cidx); R("RspQ Gen:", rspq.gen); S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); - S3("u", "Intr pktcnt:", - adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); R("FL ID:", fl.cntxt_id); R("FL size:", fl.size - 8); R("FL pend:", fl.pend_cred); @@ -2520,9 +2606,196 @@ do { \ RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); - } else if (ctrl_idx < ctrl_entries) { - const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; - int n = min(4, adap->params.nports - 4 * ctrl_idx); + goto unlock; + } + + r -= eth_entries; + if (r < uld_txq_entries[CXGB4_TX_OFLD]) { + const struct sge_uld_txq *tx; + + utxq_info = s->uld_txq_info[CXGB4_TX_OFLD]; + tx = &utxq_info->uldtxq[r * 4]; + n = min(4, utxq_info->ntxq - 4 * r); + + S("QType:", "OFLD-TXQ"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + + goto unlock; + } + + r -= uld_txq_entries[CXGB4_TX_OFLD]; + if (r < uld_rxq_entries[CXGB4_ULD_RDMA]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "RDMA-CPL"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_RDMA]; + if (r < uld_ciq_entries[CXGB4_ULD_RDMA]) { + const struct sge_ofld_rxq *rx; + int ciq_idx = 0; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA]; + ciq_idx = urxq_info->nrxq + (r * 4); + rx = &urxq_info->uldrxq[ciq_idx]; + n = min(4, urxq_info->nciq - 4 * r); + + S("QType:", "RDMA-CIQ"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + + goto unlock; + } + + r -= uld_ciq_entries[CXGB4_ULD_RDMA]; + if (r < uld_rxq_entries[CXGB4_ULD_ISCSI]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSI]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "iSCSI"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_ISCSI]; + if (r < uld_rxq_entries[CXGB4_ULD_ISCSIT]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_ISCSIT]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "iSCSIT"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_ISCSIT]; + if (r < uld_rxq_entries[CXGB4_ULD_TLS]) { + const struct sge_ofld_rxq *rx; + + urxq_info = s->uld_rxq_info[CXGB4_ULD_TLS]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, urxq_info->nrxq - 4 * r); + + S("QType:", "TLS"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_rxq_entries[CXGB4_ULD_TLS]; + if (r < uld_txq_entries[CXGB4_TX_CRYPTO]) { + const struct sge_ofld_rxq *rx; + const struct sge_uld_txq *tx; + + utxq_info = s->uld_txq_info[CXGB4_TX_CRYPTO]; + urxq_info = s->uld_rxq_info[CXGB4_ULD_CRYPTO]; + tx = &utxq_info->uldtxq[r * 4]; + rx = &urxq_info->uldrxq[r * 4]; + n = min(4, utxq_info->ntxq - 4 * r); + + S("QType:", "Crypto"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + + goto unlock; + } + + r -= uld_txq_entries[CXGB4_TX_CRYPTO]; + if (r < ctrl_entries) { + const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4]; + + n = min(4, adap->params.nports - 4 * r); S("QType:", "Control"); T("TxQ ID:", q.cntxt_id); @@ -2532,8 +2805,13 @@ do { \ T("TxQ PIDX:", q.pidx); TL("TxQFull:", q.stops); TL("TxQRestarts:", q.restarts); - } else if (fq_idx == 0) { - const struct sge_rspq *evtq = &adap->sge.fw_evtq; + + goto unlock; + } + + r -= ctrl_entries; + if (r < 1) { + const struct sge_rspq *evtq = &s->fw_evtq; seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); @@ -2544,8 +2822,13 @@ do { \ seq_printf(seq, "%-12s %16u\n", "Intr delay:", qtimer_val(adap, evtq)); seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", - adap->sge.counter_val[evtq->pktcnt_idx]); + s->counter_val[evtq->pktcnt_idx]); + + goto unlock; } + +unlock: + mutex_unlock(&uld_mutex); #undef R #undef RL #undef T @@ -2559,8 +2842,21 @@ do { \ static int sge_queue_entries(const struct adapter *adap) { + int tot_uld_entries = 0; + int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_TX_MAX; i++) + tot_uld_entries += sge_qinfo_uld_txq_entries(adap, i); + + for (i = 0; i < CXGB4_ULD_MAX; i++) { + tot_uld_entries += sge_qinfo_uld_rxq_entries(adap, i); + tot_uld_entries += sge_qinfo_uld_ciq_entries(adap, i); + } + mutex_unlock(&uld_mutex); + return DIV_ROUND_UP(adap->sge.ethqsets, 4) + - DIV_ROUND_UP(adap->sge.ofldqsets, 4) + + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } @@ -2851,15 +3147,17 @@ static int meminfo_show(struct seq_file *seq, void *v) mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo, meminfo.up_extmem2_hi); - seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n", - meminfo.rx_pages_data[0], meminfo.rx_pages_data[1], - meminfo.rx_pages_data[2]); + seq_printf(seq, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n", + meminfo.rx_pages_data[0], meminfo.free_rx_cnt, + meminfo.rx_pages_data[1], meminfo.rx_pages_data[2]); - seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n", - meminfo.tx_pages_data[0], meminfo.tx_pages_data[1], - meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]); + seq_printf(seq, "%u Tx pages (%u free) of size %u%ciB for %u channels\n", + meminfo.tx_pages_data[0], meminfo.free_tx_cnt, + meminfo.tx_pages_data[1], meminfo.tx_pages_data[2], + meminfo.tx_pages_data[3]); - seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs); + seq_printf(seq, "%u p-structs (%u free)\n\n", + meminfo.p_structs, meminfo.p_structs_free_cnt); for (i = 0; i < 4; i++) /* For T6 these are MAC buffer groups */ @@ -2924,6 +3222,169 @@ static const struct file_operations chcr_stats_debugfs_fops = { .llseek = seq_lseek, .release = single_release, }; + +#define PRINT_ADAP_STATS(string, value) \ + seq_printf(seq, "%-25s %-20llu\n", (string), \ + (unsigned long long)(value)) + +#define PRINT_CH_STATS(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats.value[i]); \ + seq_printf(seq, "\n"); \ +} while (0) + +#define PRINT_CH_STATS2(string, value) \ +do { \ + seq_printf(seq, "%-25s ", (string)); \ + for (i = 0; i < adap->params.arch.nchan; i++) \ + seq_printf(seq, "%-20llu ", \ + (unsigned long long)stats[i].value); \ + seq_printf(seq, "\n"); \ +} while (0) + +static void show_tcp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_tcp_stats v4, v6; + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, &v4, &v6, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts); + PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs); + PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs); + PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs); +} + +static void show_ddp_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_usm_stats stats; + + spin_lock(&adap->stats_lock); + t4_get_usm_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames); + PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets); + PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops); +} + +static void show_rdma_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_rdma_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_rdma_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod); + PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt); +} + +static void show_tp_err_adapter_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh); + PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer); +} + +static void show_cpl_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_cpl_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_cpl_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_cpl_requests:", req); + PRINT_CH_STATS("tp_cpl_responses:", rsp); +} + +static void show_tp_err_channel_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_err_stats stats; + u8 i; + + spin_lock(&adap->stats_lock); + t4_tp_get_err_stats(adap, &stats, false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs); + PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs); + PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs); + PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs); + PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops); + PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops); + PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops); + PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops); +} + +static void show_fcoe_stats(struct seq_file *seq) +{ + struct adapter *adap = seq->private; + struct tp_fcoe_stats stats[NCHAN]; + u8 i; + + spin_lock(&adap->stats_lock); + for (i = 0; i < adap->params.arch.nchan; i++) + t4_get_fcoe_stats(adap, i, &stats[i], false); + spin_unlock(&adap->stats_lock); + + PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp); + PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp); + PRINT_CH_STATS2("fcoe_frames_drop", frames_drop); +} + +#undef PRINT_CH_STATS2 +#undef PRINT_CH_STATS +#undef PRINT_ADAP_STATS + +static int tp_stats_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + + seq_puts(seq, "\n--------Adapter Stats--------\n"); + show_tcp_stats(seq); + show_ddp_stats(seq); + show_rdma_stats(seq); + show_tp_err_adapter_stats(seq); + + seq_puts(seq, "\n-------- Channel Stats --------\n"); + if (adap->params.arch.nchan == NCHAN) + seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n", + " ", "channel 0", "channel 1", + "channel 2", "channel 3"); + else + seq_printf(seq, "%-25s %-20s %-20s\n", + " ", "channel 0", "channel 1"); + show_cpl_stats(seq); + show_tp_err_channel_stats(seq); + show_fcoe_stats(seq); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats); + /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -2973,6 +3434,7 @@ int t4_setup_debugfs(struct adapter *adap) { "rss_key", &rss_key_debugfs_fops, 0400, 0 }, { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, + { "resources", &resources_debugfs_fops, 0400, 0 }, { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 }, { "ibq_tp0", &cim_ibq_fops, 0400, 0 }, { "ibq_tp1", &cim_ibq_fops, 0400, 1 }, @@ -2999,6 +3461,7 @@ int t4_setup_debugfs(struct adapter *adap) { "blocked_fl", &blocked_fl_fops, 0600, 0 }, { "meminfo", &meminfo_fops, 0400, 0 }, { "crypto", &chcr_stats_debugfs_fops, 0400, 0 }, + { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index f7eef93ffc87..d07230c892a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { "db_drop ", "db_full ", "db_empty ", - "tcp_ipv4_out_rsts ", - "tcp_ipv4_in_segs ", - "tcp_ipv4_out_segs ", - "tcp_ipv4_retrans_segs ", - "tcp_ipv6_out_rsts ", - "tcp_ipv6_in_segs ", - "tcp_ipv6_out_segs ", - "tcp_ipv6_retrans_segs ", - "usm_ddp_frames ", - "usm_ddp_octets ", - "usm_ddp_drops ", - "rdma_no_rqe_mod_defer ", - "rdma_no_rqe_pkt_defer ", - "tp_err_ofld_no_neigh ", - "tp_err_ofld_cong_defer ", "write_coal_success ", "write_coal_fail ", }; -static char channel_stats_strings[][ETH_GSTRING_LEN] = { - "--------Channel--------- ", - "tp_cpl_requests ", - "tp_cpl_responses ", - "tp_mac_in_errs ", - "tp_hdr_in_errs ", - "tp_tcp_in_errs ", - "tp_tcp6_in_errs ", - "tp_tnl_cong_drops ", - "tp_tnl_tx_drops ", - "tp_ofld_vlan_drops ", - "tp_ofld_chan_drops ", - "fcoe_octets_ddp ", - "fcoe_frames_ddp ", - "fcoe_frames_drop ", -}; - static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "-------Loopback----------- ", "octets_ok ", @@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "bg3_frames_trunc ", }; +static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = { + [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings) + ARRAY_SIZE(adapter_stats_strings) + - ARRAY_SIZE(channel_stats_strings) + ARRAY_SIZE(loopback_stats_strings); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(cxgb4_priv_flags_strings); default: return -EOPNOTSUPP; } @@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) FW_HDR_FW_VER_MINOR_G(exprom_vers), FW_HDR_FW_VER_MICRO_G(exprom_vers), FW_HDR_FW_VER_BUILD_G(exprom_vers)); + info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings); } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, adapter_stats_strings, sizeof(adapter_stats_strings)); data += sizeof(adapter_stats_strings); - memcpy(data, channel_stats_strings, - sizeof(channel_stats_strings)); - data += sizeof(channel_stats_strings); memcpy(data, loopback_stats_strings, sizeof(loopback_stats_strings)); + } else if (stringset == ETH_SS_PRIV_FLAGS) { + memcpy(data, cxgb4_priv_flags_strings, + sizeof(cxgb4_priv_flags_strings)); } } @@ -270,41 +244,10 @@ struct adapter_stats { u64 db_drop; u64 db_full; u64 db_empty; - u64 tcp_v4_out_rsts; - u64 tcp_v4_in_segs; - u64 tcp_v4_out_segs; - u64 tcp_v4_retrans_segs; - u64 tcp_v6_out_rsts; - u64 tcp_v6_in_segs; - u64 tcp_v6_out_segs; - u64 tcp_v6_retrans_segs; - u64 frames; - u64 octets; - u64 drops; - u64 rqe_dfr_mod; - u64 rqe_dfr_pkt; - u64 ofld_no_neigh; - u64 ofld_cong_defer; u64 wc_success; u64 wc_fail; }; -struct channel_stats { - u64 cpl_req; - u64 cpl_rsp; - u64 mac_in_errs; - u64 hdr_in_errs; - u64 tcp_in_errs; - u64 tcp6_in_errs; - u64 tnl_cong_drops; - u64 tnl_tx_drops; - u64 ofld_vlan_drops; - u64 ofld_chan_drops; - u64 octets_ddp; - u64 frames_ddp; - u64 frames_drop; -}; - static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap, static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) { - struct tp_tcp_stats v4, v6; - struct tp_rdma_stats rdma_stats; - struct tp_err_stats err_stats; - struct tp_usm_stats usm_stats; u64 val1, val2; memset(s, 0, sizeof(*s)); - spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, &v4, &v6, false); - t4_tp_get_rdma_stats(adap, &rdma_stats, false); - t4_get_usm_stats(adap, &usm_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - spin_unlock(&adap->stats_lock); - s->db_drop = adap->db_stats.db_drop; s->db_full = adap->db_stats.db_full; s->db_empty = adap->db_stats.db_empty; - s->tcp_v4_out_rsts = v4.tcp_out_rsts; - s->tcp_v4_in_segs = v4.tcp_in_segs; - s->tcp_v4_out_segs = v4.tcp_out_segs; - s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; - s->tcp_v6_out_rsts = v6.tcp_out_rsts; - s->tcp_v6_in_segs = v6.tcp_in_segs; - s->tcp_v6_out_segs = v6.tcp_out_segs; - s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; - - if (is_offload(adap)) { - s->frames = usm_stats.frames; - s->octets = usm_stats.octets; - s->drops = usm_stats.drops; - s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; - s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; - } - - s->ofld_no_neigh = err_stats.ofld_no_neigh; - s->ofld_cong_defer = err_stats.ofld_cong_defer; - if (!is_t4(adap->params.chip)) { int v; @@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) } } -static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, - u8 i) -{ - struct tp_cpl_stats cpl_stats; - struct tp_err_stats err_stats; - struct tp_fcoe_stats fcoe_stats; - - memset(s, 0, sizeof(*s)); - - spin_lock(&adap->stats_lock); - t4_tp_get_cpl_stats(adap, &cpl_stats, false); - t4_tp_get_err_stats(adap, &err_stats, false); - t4_get_fcoe_stats(adap, i, &fcoe_stats, false); - spin_unlock(&adap->stats_lock); - - s->cpl_req = cpl_stats.req[i]; - s->cpl_rsp = cpl_stats.rsp[i]; - s->mac_in_errs = err_stats.mac_in_errs[i]; - s->hdr_in_errs = err_stats.hdr_in_errs[i]; - s->tcp_in_errs = err_stats.tcp_in_errs[i]; - s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; - s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; - s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; - s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; - s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; - s->octets_ddp = fcoe_stats.octets_ddp; - s->frames_ddp = fcoe_stats.frames_ddp; - s->frames_drop = fcoe_stats.frames_drop; -} - static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -429,11 +311,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, data += sizeof(struct adapter_stats) / sizeof(u64); *data++ = (u64)pi->port_id; - collect_channel_stats(adapter, (struct channel_stats *)data, - pi->port_id); - data += sizeof(struct channel_stats) / sizeof(u64); - - *data++ = (u64)pi->port_id; memset(&s, 0, sizeof(s)); t4_get_lb_stats(adapter, pi->port_id, &s); @@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev, fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); - if (netif_carrier_ok(dev)) { - base->speed = pi->link_cfg.speed; - base->duplex = DUPLEX_FULL; - } else { - base->speed = SPEED_UNKNOWN; - base->duplex = DUPLEX_UNKNOWN; - } + base->speed = (netif_carrier_ok(dev) + ? pi->link_cfg.speed + : SPEED_UNKNOWN); + base->duplex = DUPLEX_FULL; if (pi->link_cfg.fc & PAUSE_RX) { if (pi->link_cfg.fc & PAUSE_TX) { @@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev, offset, len, &data[eprom->len - len]); } +static u32 cxgb4_get_priv_flags(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + return (adapter->eth_flags | pi->eth_flags); +} + +/** + * set_flags - set/unset specified flags if passed in new_flags + * @cur_flags: pointer to current flags + * @new_flags: new incoming flags + * @flags: set of flags to set/unset + */ +static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags) +{ + *cur_flags = (*cur_flags & ~flags) | (new_flags & flags); +} + +static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + + set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP); + set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT); + + return 0; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_dump_data = get_dump_data, .get_module_info = cxgb4_get_module_info, .get_module_eeprom = cxgb4_get_module_eeprom, + .get_priv_flags = cxgb4_get_priv_flags, + .set_priv_flags = cxgb4_set_priv_flags, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a8926e97935e..0f7ce71205e6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -554,10 +554,9 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, dev = q->adap->port[q->adap->chan_map[port]]; dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO - ? !!(pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS_F) - : !!(pcmd->u.info32.lstatus32_to_cbllen32 & - FW_PORT_CMD_DCBXDIS32_F)); + ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) + : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32) + & FW_PORT_CMD_DCBXDIS32_F)); state_input = (dcbxdis ? CXGB4_DCB_INPUT_FW_DISABLED : CXGB4_DCB_INPUT_FW_ENABLED); @@ -924,12 +923,14 @@ static int setup_sge_queues(struct adapter *adap) QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); return 0; freeout: + dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err); t4_free_sge_resources(adap); return err; } static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { int txq; @@ -971,7 +972,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return txq; } - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; } static int closest_timer(const struct sge *s, int time) @@ -3016,7 +3017,7 @@ static int cxgb_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, - pi, dev); + pi, dev, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); return 0; @@ -3219,7 +3220,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev, static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, - .ndo_start_xmit = t4_eth_xmit, + .ndo_start_xmit = t4_start_xmit, .ndo_select_queue = cxgb_select_queue, .ndo_get_stats64 = cxgb_get_stats, .ndo_set_rx_mode = cxgb_set_rxmode, @@ -3538,6 +3539,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) u32 v; int ret; + /* Now that we've successfully configured and initialized the adapter + * can ask the Firmware what resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + return ret; + } + /* get device capabilities */ memset(c, 0, sizeof(*c)); c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | @@ -4172,32 +4183,6 @@ static int adap_init0(struct adapter *adap) goto bye; } - /* - * Grab VPD parameters. This should be done after we establish a - * connection to the firmware since some of the VPD parameters - * (notably the Core Clock frequency) are retrieved via requests to - * the firmware. On the other hand, we need these fairly early on - * so we do this right after getting ahold of the firmware. - */ - ret = t4_get_vpd_params(adap, &adap->params.vpd); - if (ret < 0) - goto bye; - - /* - * Find out what ports are available to us. Note that we need to do - * this before calling adap_init0_no_config() since it needs nports - * and portvec ... - */ - v = - FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | - FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); - if (ret < 0) - goto bye; - - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - /* If the firmware is initialized already, emit a simply note to that * effect. Otherwise, it's time to try initializing the adapter. */ @@ -4248,6 +4233,45 @@ static int adap_init0(struct adapter *adap) } } + /* Now that we've successfully configured and initialized the adapter + * (or found it already initialized), we can ask the Firmware what + * resources it has provisioned for us. + */ + ret = t4_get_pfres(adap); + if (ret) { + dev_err(adap->pdev_dev, + "Unable to retrieve resource provisioning information\n"); + goto bye; + } + + /* Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + * + * We need to do this after initializing the adapter because someone + * could have FLASHed a new VPD which won't be read by the firmware + * until we do the RESET ... + */ + ret = t4_get_vpd_params(adap, &adap->params.vpd); + if (ret < 0) + goto bye; + + /* Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); + if (ret < 0) + goto bye; + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + /* Give the SGE code a chance to pull in anything that it needs ... * Note that this must be called after we retrieve our VPD parameters * in order to know how to convert core ticks to seconds, etc. @@ -4799,10 +4823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc) * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ -static void cfg_queues(struct adapter *adap) +static int cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; - int i = 0, n10g = 0, qidx = 0; + int i, n10g = 0, qidx = 0; + int niqflint, neq, avail_eth_qsets; + int max_eth_qsets = 32; #ifndef CONFIG_CHELSIO_T4_DCB int q10g = 0; #endif @@ -4814,16 +4840,46 @@ static void cfg_queues(struct adapter *adap) adap->params.crypto = 0; } - n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + /* Calculate the number of Ethernet Queue Sets available based on + * resources provisioned for us. We always have an Asynchronous + * Firmware Event Ingress Queue. If we're operating in MSI or Legacy + * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt + * Ingress Queue. Meanwhile, we need two Egress Queues for each + * Queue Set: one for the Free List and one for the Ethernet TX Queue. + * + * Note that we should also take into account all of the various + * Offload Queues. But, in any situation where we're operating in + * a Resource Constrained Provisioning environment, doing any Offload + * at all is problematic ... + */ + niqflint = adap->params.pfres.niqflint - 1; + if (!(adap->flags & USING_MSIX)) + niqflint--; + neq = adap->params.pfres.neq / 2; + avail_eth_qsets = min(niqflint, neq); + + if (avail_eth_qsets > max_eth_qsets) + avail_eth_qsets = max_eth_qsets; + + if (avail_eth_qsets < adap->params.nports) { + dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n", + avail_eth_qsets, adap->params.nports); + return -ENOMEM; + } + + /* Count the number of 10Gb/s or better ports */ + for_each_port(adap, i) + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); + #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its * own TX Queue in order to prevent Head-Of-Line Blocking. */ - if (adap->params.nports * 8 > MAX_ETH_QSETS) { - dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", - MAX_ETH_QSETS, adap->params.nports * 8); - BUG_ON(1); + if (adap->params.nports * 8 > avail_eth_qsets) { + dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", + avail_eth_qsets, adap->params.nports * 8); + return -ENOMEM; } for_each_port(adap, i) { @@ -4839,7 +4895,7 @@ static void cfg_queues(struct adapter *adap) * per 10G port. */ if (n10g) - q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues(); @@ -4890,6 +4946,8 @@ static void cfg_queues(struct adapter *adap) init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); init_rspq(adap, &s->intrq, 0, 1, 512, 64); + + return 0; } /* @@ -5630,10 +5688,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } + if (!(adapter->flags & FW_OK)) + goto fw_attach_fail; + /* Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ - cfg_queues(adapter); + err = cfg_queues(adapter); + if (err) + goto out_free_dev; adapter->smt = t4_init_smt(); if (!adapter->smt) { @@ -5705,7 +5768,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { u32 hash_base, hash_reg; - if (chip <= CHELSIO_T5) { + if (chip_ver <= CHELSIO_T5) { hash_reg = LE_DB_TID_HASHBASE_A; hash_base = t4_read_reg(adapter, hash_reg); adapter->tids.hash_base = hash_base / 4; @@ -5740,6 +5803,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_dev; } +fw_attach_fail: /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3ddd2c4acf68..623f73dd7738 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap) { int ret; + if (adap->tc_flower_initialized) + return -EEXIST; + adap->flower_ht_params = cxgb4_tc_flower_ht_params; ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); if (ret) @@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap) INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); + adap->tc_flower_initialized = true; return 0; } void cxgb4_cleanup_tc_flower(struct adapter *adap) { + if (!adap->tc_flower_initialized) + return; + if (adap->flower_stats_timer.function) del_timer_sync(&adap->flower_stats_timer); cancel_work_sync(&adap->flower_stats_work); rhashtable_destroy(&adap->flower_tbl); + adap->tc_flower_initialized = false; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 9148abb7994c..7fc656680299 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap) struct port_info *pi = netdev2pinfo(adap->port[j]); s = pi->sched_tbl; + if (!s) + continue; + for (i = 0; i < s->sched_size; i++) { struct sched_class *e; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 395e2a0e8d7f..6807bc3a44fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, } /** - * t4_eth_xmit - add a packet to an Ethernet Tx queue + * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. */ -netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; u64 cntrl, *end, *sgl; @@ -1547,6 +1547,374 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } +/* Constants ... */ +enum { + /* Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), +}; + +/** + * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int t4vf_is_eth_imm(const struct sk_buff *skb) +{ + /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (t4vf_is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const struct skb_shared_info *ssi; + struct fw_eth_tx_pkt_vm_wr *wr; + int qidx, credits, max_pkt_len; + struct cpl_tx_pkt_core *cpl; + const struct port_info *pi; + unsigned int flits, ndesc; + struct sge_eth_txq *txq; + struct adapter *adapter; + u64 cntrl, *end; + u32 wr_mid; + const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci); + + /* The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* Discard the packet if the length is greater than mtu */ + max_pkt_len = ETH_HLEN + dev->mtu; + if (skb_vlan_tag_present(skb)) + max_pkt_len += VLAN_HLEN; + if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) + goto out_free; + + /* Figure out which TX Queue we're going to use. */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + WARN_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + + /* Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = t4vf_calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + eth_txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!t4vf_is_eth_imm(skb) && + unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + eth_txq_stop(txq); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + /* Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); + /* Fill in the LSO CPL message. */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | + LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = (t4vf_is_eth_imm(skb) + ? skb->len + sizeof(*cpl) + : sizeof(*cpl)); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); + + /* Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; + txq->tx_cso++; + } else { + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; + } + } + + /* If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (skb_vlan_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); + } + + /* Fill in the TX Packet CPL message header. */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | + TXPKT_INTF_V(pi->port_id) | + TXPKT_PF_V(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + /* Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (t4vf_is_eth_imm(skb)) { + /* In-line the packet's data and free the skb since we don't + * need it any longer. + */ + cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + /* Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = (void *)((void *)tq->desc + + ((void *)end - (void *)tq->stat)); + } + + cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + + cxgb4_ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) + return cxgb4_vf_eth_xmit(skb, dev); + + return cxgb4_eth_xmit(skb, dev); +} + /** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue @@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); if (cong >= 0) - c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | + FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC + : FW_IQ_IQTYPE_OFLD)); if (fl) { enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3720c3e11ebb..2d9943f90a75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) return 0; } +/** + * t4_get_pfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a physical + * function. The results are stored in @adapter->pfres. + */ +int t4_get_pfres(struct adapter *adapter) +{ + struct pf_resources *pfres = &adapter->params.pfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PFVF_CMD_PFN_V(adapter->pf) | + FW_PFVF_CMD_VFN_V(0)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* Extract PF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + pfres->niq = FW_PFVF_CMD_NIQ_G(word); + + word = be32_to_cpu(rpl.type_to_neq); + pfres->neq = FW_PFVF_CMD_NEQ_G(word); + pfres->pmask = FW_PFVF_CMD_PMASK_G(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + pfres->tc = FW_PFVF_CMD_TC_G(word); + pfres->nvi = FW_PFVF_CMD_NVI_G(word); + pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); + + return 0; +} + /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index c7f8d0441278..e3adf435913e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 6b55aa2eb2a5..eb222d40ddbf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1502,6 +1502,25 @@ #define TP_MIB_DATA_A 0x7e54 #define TP_INT_CAUSE_A 0x7e74 +#define TP_FLM_FREE_PS_CNT_A 0x7e80 +#define TP_FLM_FREE_RX_CNT_A 0x7e84 + +#define FREEPSTRUCTCOUNT_S 0 +#define FREEPSTRUCTCOUNT_M 0x1fffffU +#define FREEPSTRUCTCOUNT_G(x) (((x) >> FREEPSTRUCTCOUNT_S) & FREEPSTRUCTCOUNT_M) + +#define FREERXPAGECOUNT_S 0 +#define FREERXPAGECOUNT_M 0x1fffffU +#define FREERXPAGECOUNT_V(x) ((x) << FREERXPAGECOUNT_S) +#define FREERXPAGECOUNT_G(x) (((x) >> FREERXPAGECOUNT_S) & FREERXPAGECOUNT_M) + +#define TP_FLM_FREE_TX_CNT_A 0x7e88 + +#define FREETXPAGECOUNT_S 0 +#define FREETXPAGECOUNT_M 0x1fffffU +#define FREETXPAGECOUNT_V(x) ((x) << FREETXPAGECOUNT_S) +#define FREETXPAGECOUNT_G(x) (((x) >> FREETXPAGECOUNT_S) & FREETXPAGECOUNT_M) + #define FLMTXFLSTEMPTY_S 30 #define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S) #define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U) @@ -1683,6 +1702,16 @@ #define ULP_TX_LA_RDPTR_0_A 0x8ec0 #define ULP_TX_LA_RDDATA_0_A 0x8ec4 #define ULP_TX_LA_WRPTR_0_A 0x8ec8 +#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70 + +#define ULP_TX_ASIC_DEBUG_0_A 0x8f74 +#define ULP_TX_ASIC_DEBUG_1_A 0x8f78 +#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c +#define ULP_TX_ASIC_DEBUG_3_A 0x8f80 +#define ULP_TX_ASIC_DEBUG_4_A 0x8f84 + +/* registers for module PM_RX */ +#define PM_RX_BASE_ADDR 0x8fc0 #define PMRX_E_PCMD_PAR_ERROR_S 0 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index f1967cf6d43c..5dc6c4154af8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1472,6 +1472,12 @@ enum fw_iq_type { FW_IQ_TYPE_NO_FL_INT_CAP }; +enum fw_iq_iqtype { + FW_IQ_IQTYPE_OTHER, + FW_IQ_IQTYPE_NIC, + FW_IQ_IQTYPE_OFLD, +}; + struct fw_iq_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; @@ -1586,6 +1592,12 @@ struct fw_iq_cmd { #define FW_IQ_CMD_IQFLINTISCSIC_S 26 #define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) +#define FW_IQ_CMD_IQTYPE_S 24 +#define FW_IQ_CMD_IQTYPE_M 0x3 +#define FW_IQ_CMD_IQTYPE_V(x) ((x) << FW_IQ_CMD_IQTYPE_S) +#define FW_IQ_CMD_IQTYPE_G(x) \ + (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M) + #define FW_IQ_CMD_FL0CNGCHMAP_S 20 #define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 0ed161642371..74849be5f004 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -412,12 +412,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, ppmax * (sizeof(struct cxgbi_ppod_data)) + ppod_bmap_size * sizeof(unsigned long); - ppm = vmalloc(alloc_sz); + ppm = vzalloc(alloc_sz); if (!ppm) goto release_ppm_pool; - memset(ppm, 0, alloc_sz); - ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]); if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) { diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 6d7404f66f84..1c9ad3630c77 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -46,6 +46,11 @@ #define DRV_NAME "gmac-gemini" #define DRV_VERSION "1.0" +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + #define HSIZE_8 0x00 #define HSIZE_16 0x01 #define HSIZE_32 0x02 @@ -146,6 +151,7 @@ struct gemini_ethernet { void __iomem *base; struct gemini_ethernet_port *port0; struct gemini_ethernet_port *port1; + bool initialized; spinlock_t irq_lock; /* Locks IRQ-related registers */ unsigned int freeq_order; @@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev) status.bits.speed = GMAC_SPEED_1000; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; - netdev_info(netdev, "connect to RGMII @ 1Gbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", + phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 100 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", + phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; if (phydev->interface == PHY_INTERFACE_MODE_RGMII) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII @ 10 Mbit\n"); + netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", + phydev_name(phydev)); break; default: - netdev_warn(netdev, "Not supported PHY speed (%d)\n", - phydev->speed); + netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n", + phydev->speed, phydev_name(phydev)); } if (phydev->duplex == DUPLEX_FULL) { @@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - netdev_info(netdev, "connected to PHY \"%s\"\n", - phydev_name(phy)); - phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", - (unsigned long)phy->phy_id, - phy_modes(phy->interface)); - phy->supported &= PHY_GBIT_FEATURES; phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; phy->advertising = phy->supported; @@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev) /* set PHY interface type */ switch (phy->interface) { case PHY_INTERFACE_MODE_MII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "MII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_MII; - netdev_info(netdev, "connect to MII\n"); break; case PHY_INTERFACE_MODE_GMII: - netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n"); + netdev_dbg(netdev, + "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_GMII; - netdev_info(netdev, "connect to GMII\n"); break; case PHY_INTERFACE_MODE_RGMII: - dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n"); + netdev_dbg(netdev, + "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; - netdev_info(netdev, "connect to RGMII\n"); break; default: netdev_err(netdev, "Unsupported MII interface\n"); @@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev) } writel(status.bits32, port->gmac_base + GMAC_STATUS); + if (netif_msg_link(port)) + phy_attached_info(phy); + return 0; } -static int gmac_pick_rx_max_len(int max_l3_len) -{ - /* index = CONFIG_MAXLEN_XXX values */ - static const int max_len[8] = { - 1536, 1518, 1522, 1542, - 9212, 10236, 1518, 1518 - }; - int i, n = 5; +/* The maximum frame length is not logically enumerated in the + * hardware, so we do a table lookup to find the applicable max + * frame length. + */ +struct gmac_max_framelen { + unsigned int max_l3_len; + u8 val; +}; + +static const struct gmac_max_framelen gmac_maxlens[] = { + { + .max_l3_len = 1518, + .val = CONFIG0_MAXLEN_1518, + }, + { + .max_l3_len = 1522, + .val = CONFIG0_MAXLEN_1522, + }, + { + .max_l3_len = 1536, + .val = CONFIG0_MAXLEN_1536, + }, + { + .max_l3_len = 1542, + .val = CONFIG0_MAXLEN_1542, + }, + { + .max_l3_len = 9212, + .val = CONFIG0_MAXLEN_9k, + }, + { + .max_l3_len = 10236, + .val = CONFIG0_MAXLEN_10k, + }, +}; - max_l3_len += ETH_HLEN + VLAN_HLEN; +static int gmac_pick_rx_max_len(unsigned int max_l3_len) +{ + const struct gmac_max_framelen *maxlen; + int maxtot; + int i; - if (max_l3_len > max_len[n]) - return -1; + maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN; - for (i = 0; i < 5; i++) { - if (max_len[i] >= max_l3_len && max_len[i] < max_len[n]) - n = i; + for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) { + maxlen = &gmac_maxlens[i]; + if (maxtot <= maxlen->max_l3_len) + return maxlen->val; } - return n; + return -1; } static int gmac_init(struct net_device *netdev) @@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable) unsigned long flags; u32 val, mask; - netdev_info(netdev, "%s device %d %s\n", __func__, - netdev->dev_id, enable ? "enable" : "disable"); + netdev_dbg(netdev, "%s device %d %s\n", __func__, + netdev->dev_id, enable ? "enable" : "disable"); spin_lock_irqsave(&geth->irq_lock, flags); mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); @@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev) phy_start(netdev->phydev); err = geth_resize_freeq(port); - if (err) { + /* It's fine if it's just busy, the other port has set up + * the freeq in that case. + */ + if (err && (err != -EBUSY)) { netdev_err(netdev, "could not resize freeq\n"); goto err_stop_phy; } @@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev) HRTIMER_MODE_REL); port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; - netdev_info(netdev, "opened\n"); + netdev_dbg(netdev, "opened\n"); return 0; @@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port) static void gemini_ethernet_init(struct gemini_ethernet *geth) { + /* Only do this once both ports are online */ + if (geth->initialized) + return; + if (geth->port0 && geth->port1) + geth->initialized = true; + else + return; + writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); @@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->id = id; port->geth = geth; port->dev = dev; + port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* DMA memory */ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) geth->port0 = port; else geth->port1 = port; + + /* This will just be done once both ports are up and reset */ + gemini_ethernet_init(geth); + platform_set_drvdata(pdev, port); /* Set up and register the netdev */ @@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; + /* We can handle jumbo frames up to 10236 bytes so, let's accept + * payloads of 10236 bytes minus VLAN and ethernet header + */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll, @@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); - random_ether_addr(netdev->dev_addr); + eth_random_addr(netdev->dev_addr); } gmac_write_mac_address(netdev); @@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev) spin_lock_init(&geth->irq_lock); spin_lock_init(&geth->freeq_lock); - gemini_ethernet_init(geth); /* The children will use this */ platform_set_drvdata(pdev, geth); @@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); - gemini_ethernet_init(geth); geth_cleanup_freeq(geth); + geth->initialized = false; return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 382891f81e09..d80fe03d3107 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "11.4.0.0" +#define DRV_VER "12.0.0.0" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q) struct be_eq_obj { struct be_queue_info q; - char desc[32]; - - /* Adaptive interrupt coalescing (AIC) info */ - bool enable_aic; - u32 min_eqd; /* in usecs */ - u32 max_eqd; /* in usecs */ - u32 eqd; /* configured val when aic is off */ - u32 cur_eqd; /* in usecs */ + struct be_adapter *adapter; + struct napi_struct napi; u8 idx; /* array index */ u8 msix_idx; u16 spurious_intr; - struct napi_struct napi; - struct be_adapter *adapter; cpumask_var_t affinity_mask; - -#ifdef CONFIG_NET_RX_BUSY_POLL -#define BE_EQ_IDLE 0 -#define BE_EQ_NAPI 1 /* napi owns this EQ */ -#define BE_EQ_POLL 2 /* poll owns this EQ */ -#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL) -#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */ -#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */ -#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) -#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD) - unsigned int state; - spinlock_t lock; /* lock to serialize napi and busy-poll */ -#endif /* CONFIG_NET_RX_BUSY_POLL */ } ____cacheline_aligned_in_smp; struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ @@ -238,7 +217,6 @@ struct be_tx_stats { u64 tx_vxlan_offload_pkts; u64 tx_reqs; u64 tx_compl; - ulong tx_jiffies; u32 tx_stops; u32 tx_drv_drops; /* pkts dropped by driver */ /* the error counters are described in be_ethtool.c */ @@ -261,9 +239,9 @@ struct be_tx_compl_info { struct be_tx_obj { u32 db_offset; + struct be_tx_compl_info txcp; struct be_queue_info q; struct be_queue_info cq; - struct be_tx_compl_info txcp; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats; @@ -458,10 +436,10 @@ struct be_port_resources { #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) struct rss_info { - u64 rss_flags; u8 rsstable[RSS_INDIR_TABLE_LEN]; u8 rss_queue[RSS_INDIR_TABLE_LEN]; u8 rss_hkey[RSS_HASH_KEY_LEN]; + u64 rss_flags; }; #define BE_INVALID_DIE_TEMP 0xFF @@ -544,11 +522,13 @@ enum { }; struct be_error_recovery { - /* Lancer error recovery variables */ - u8 recovery_retries; + union { + u8 recovery_retries; /* used for Lancer */ + u8 recovery_state; /* used for BEx and Skyhawk */ + }; /* BEx/Skyhawk error recovery variables */ - u8 recovery_state; + bool recovery_supported; u16 ue_to_reset_time; /* Time after UE, to soft reset * the chip - PF0 only */ @@ -556,7 +536,6 @@ struct be_error_recovery { * of SLIPORT_SEMAPHORE reg */ u16 last_err_code; - bool recovery_supported; unsigned long probe_time; unsigned long last_recovery_time; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 8f755009ff38..d0b9415d9ae7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1412,6 +1412,83 @@ drop: return NETDEV_TX_OK; } +static void be_tx_timeout(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + struct be_tx_obj *txo; + struct sk_buff *skb; + struct tcphdr *tcphdr; + struct udphdr *udphdr; + u32 *entry; + int status; + int i, j; + + for_all_tx_queues(adapter, txo, i) { + dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n", + i, txo->q.head, txo->q.tail, + atomic_read(&txo->q.used), txo->q.id); + + entry = txo->q.dma_mem.va; + for (j = 0; j < TX_Q_LEN * 4; j += 4) { + if (entry[j] != 0 || entry[j + 1] != 0 || + entry[j + 2] != 0 || entry[j + 3] != 0) { + dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n", + j, entry[j], entry[j + 1], + entry[j + 2], entry[j + 3]); + } + } + + entry = txo->cq.dma_mem.va; + dev_info(dev, "TXCQ Dump: %d H: %d T: %d used: %d\n", + i, txo->cq.head, txo->cq.tail, + atomic_read(&txo->cq.used)); + for (j = 0; j < TX_CQ_LEN * 4; j += 4) { + if (entry[j] != 0 || entry[j + 1] != 0 || + entry[j + 2] != 0 || entry[j + 3] != 0) { + dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n", + j, entry[j], entry[j + 1], + entry[j + 2], entry[j + 3]); + } + } + + for (j = 0; j < TX_Q_LEN; j++) { + if (txo->sent_skb_list[j]) { + skb = txo->sent_skb_list[j]; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) { + tcphdr = tcp_hdr(skb); + dev_info(dev, "TCP source port %d\n", + ntohs(tcphdr->source)); + dev_info(dev, "TCP dest port %d\n", + ntohs(tcphdr->dest)); + dev_info(dev, "TCP sequence num %d\n", + ntohs(tcphdr->seq)); + dev_info(dev, "TCP ack_seq %d\n", + ntohs(tcphdr->ack_seq)); + } else if (ip_hdr(skb)->protocol == + IPPROTO_UDP) { + udphdr = udp_hdr(skb); + dev_info(dev, "UDP source port %d\n", + ntohs(udphdr->source)); + dev_info(dev, "UDP dest port %d\n", + ntohs(udphdr->dest)); + } + dev_info(dev, "skb[%d] %p len %d proto 0x%x\n", + j, skb, skb->len, skb->protocol); + } + } + } + + if (lancer_chip(adapter)) { + dev_info(dev, "Initiating reset due to tx timeout\n"); + dev_info(dev, "Resetting adapter\n"); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); + if (status) + dev_err(dev, "Reset failed .. Reboot server\n"); + } +} + static inline bool be_in_all_promisc(struct be_adapter *adapter) { return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) == @@ -3274,7 +3351,7 @@ void be_detect_error(struct be_adapter *adapter) /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { - dev_info(dev, "Firmware update in progress\n"); + dev_info(dev, "Reset is in progress\n"); } else { dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", @@ -3403,9 +3480,11 @@ static int be_msix_register(struct be_adapter *adapter) int status, i, vec; for_all_evt_queues(adapter, eqo, i) { - sprintf(eqo->desc, "%s-q%d", netdev->name, i); + char irq_name[IFNAMSIZ+4]; + + snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); - status = request_irq(vec, be_msix, 0, eqo->desc, eqo); + status = request_irq(vec, be_msix, 0, irq_name, eqo); if (status) goto err_msix; @@ -5216,6 +5295,7 @@ static const struct net_device_ops be_netdev_ops = { .ndo_get_vf_config = be_get_vf_config, .ndo_set_vf_link_state = be_set_vf_link_state, .ndo_set_vf_spoofchk = be_set_vf_spoofchk, + .ndo_tx_timeout = be_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = be_netpoll, #endif diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ab02057ac730..65a22cd9aef2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1171,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; params.specific_params.non_rx_params.err_fqid = errq->fqid; @@ -1213,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, buf_prefix_content.priv_data_size = buf_layout->priv_data_size; buf_prefix_content.pass_prs_result = true; buf_prefix_content.pass_hash_result = true; - buf_prefix_content.pass_time_stamp = false; + buf_prefix_content.pass_time_stamp = true; buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; rx_p = ¶ms.specific_params.rx_params; @@ -1610,14 +1610,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, { const enum dma_data_direction dma_dir = DMA_TO_DEVICE; struct device *dev = priv->net_dev->dev.parent; + struct skb_shared_hwtstamps shhwtstamps; dma_addr_t addr = qm_fd_addr(fd); const struct qm_sg_entry *sgt; struct sk_buff **skbh, *skb; int nr_frags, i; + u64 ns; skbh = (struct sk_buff **)phys_to_virt(addr); skb = *skbh; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, + &ns)) { + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } else { + dev_warn(dev, "fman_port_get_tstamp failed!\n"); + } + } + if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { nr_frags = skb_shinfo(skb)->nr_frags; dma_unmap_single(dev, addr, @@ -2087,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) goto skb_to_fd_failed; + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) return NETDEV_TX_OK; @@ -2228,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, const struct qm_dqrr_entry *dq) { + struct skb_shared_hwtstamps *shhwtstamps; struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; const struct qm_fd *fd = &dq->fd; @@ -2241,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct sk_buff *skb; int *count_ptr; void *vaddr; + u64 ns; fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); @@ -2305,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (!skb) return qman_cb_dqrr_consume; + if (priv->rx_tstamp) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) + shhwtstamps->hwtstamp = ns_to_ktime(ns); + else + dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); + } + skb->protocol = eth_type_trans(skb, net_dev); if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && @@ -2524,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* Couldn't disable rx/tx timestamping separately. + * Do nothing here. + */ + priv->rx_tstamp = false; + } else { + priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - if (!net_dev->phydev) - return -EINVAL; - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + int ret = -EINVAL; + + if (cmd == SIOCGMIIREG) { + if (net_dev->phydev) + return phy_mii_ioctl(net_dev->phydev, rq, cmd); + } + + if (cmd == SIOCSHWTSTAMP) + return dpaa_ts_ioctl(net_dev, rq, cmd); + + return ret; } static const struct net_device_ops dpaa_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index bd9422082f83..af320f83c742 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -182,6 +182,9 @@ struct dpaa_priv { struct dpaa_buffer_layout buf_layout[2]; u16 rx_headroom; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ }; /* from dpaa_ethtool.c */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 2f933b6b2f4e..3184c8f7cdd0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -32,6 +32,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> +#include <linux/of_platform.h> +#include <linux/net_tstamp.h> +#include <linux/fsl/ptp_qoriq.h> #include "dpaa_eth.h" #include "mac.h" @@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int dpaa_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *info) +{ + struct device *dev = net_dev->dev.parent; + struct device_node *mac_node = dev->of_node; + struct device_node *fman_node = NULL, *ptp_node = NULL; + struct platform_device *ptp_dev = NULL; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; + + fman_node = of_get_parent(mac_node); + if (fman_node) + ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0); + + if (ptp_node) + ptp_dev = of_find_device_by_node(ptp_node); + + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + + if (ptp) + info->phc_index = ptp->phc_index; + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = { .set_link_ksettings = dpaa_set_link_ksettings, .get_rxnfc = dpaa_get_rxnfc, .set_rxnfc = dpaa_set_rxnfc, + .get_ts_info = dpaa_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c729665107f5..76366c735831 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -48,6 +48,7 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/clk.h> +#include <linux/crc32.h> #include <linux/platform_device.h> #include <linux/mdio.h> #include <linux/phy.h> @@ -2955,7 +2956,7 @@ static void set_multicast_list(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct netdev_hw_addr *ha; - unsigned int i, bit, data, crc, tmp; + unsigned int crc, tmp; unsigned char hash; unsigned int hash_high = 0, hash_low = 0; @@ -2983,15 +2984,7 @@ static void set_multicast_list(struct net_device *ndev) /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ - crc = 0xffffffff; - - for (i = 0; i < ndev->addr_len; i++) { - data = ha->addr[i]; - for (bit = 0; bit < 8; bit++, data >>= 1) { - crc = (crc >> 1) ^ - (((crc ^ data) & 1) ? CRC32_POLY : 0); - } - } + crc = ether_crc_le(ndev->addr_len, ha->addr); /* only upper 6 bits (FEC_HASH_BITS) are used * which point to specific bit in the hash registers @@ -3136,6 +3129,7 @@ static int fec_enet_init(struct net_device *ndev) unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : sizeof(struct bufdesc); unsigned dsize_log2 = __fls(dsize); + int ret; WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) @@ -3146,6 +3140,13 @@ static int fec_enet_init(struct net_device *ndev) fep->tx_align = 0x3; #endif + /* Check mask of the streaming and coherent API */ + ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); + return ret; + } + fec_enet_alloc_queue(ndev); bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 36c2d7d6ee1b..7e892b1cbd3d 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) { unsigned long flags; u32 val, tempval; - int inc; struct timespec64 ts; u64 ns; val = 0; @@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) fep->pps_channel = DEFAULT_PPS_CHANNEL; fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; - inc = fep->ptp_inc; spin_lock_irqsave(&fep->tmreg_lock, flags); diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 9530405030a7..c415ac67cb7b 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) of_node_put(muram_node); of_node_put(fm_node); - err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); + err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED, + "fman", fman); if (err < 0) { dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n", __func__, irq, err); diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h index bfa02e0014ae..935c317fa696 100644 --- a/drivers/net/ethernet/freescale/fman/fman.h +++ b/drivers/net/ethernet/freescale/fman/fman.h @@ -41,6 +41,7 @@ /* Frame queue Context Override */ #define FM_FD_CMD_FCO 0x80000000 #define FM_FD_CMD_RPD 0x40000000 /* Read Prepended Data */ +#define FM_FD_CMD_UPD 0x20000000 /* Update Prepended Data */ #define FM_FD_CMD_DTC 0x10000000 /* Do L4 Checksum */ /* TX-Port: Unsupported Format */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 57b1e2b47c0a..1ca543ac8f2c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,11 +123,13 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 +#define TCTRL_TTSE 0x00000040 #define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 #define RCTRL_GHTX 0x00000400 +#define RCTRL_RTSE 0x00000040 #define RCTRL_GRS 0x00000020 #define RCTRL_MPROM 0x00000008 #define RCTRL_RSF 0x00000004 @@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) return 0; } +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 rctrl, tctrl; + + if (!is_init_done(dtsec->dtsec_drv_param)) + return -EINVAL; + + rctrl = ioread32be(®s->rctrl); + tctrl = ioread32be(®s->tctrl); + + if (enable) { + rctrl |= RCTRL_RTSE; + tctrl |= TCTRL_TTSE; + } else { + rctrl &= ~RCTRL_RTSE; + tctrl &= ~TCTRL_TTSE; + } + + iowrite32be(rctrl, ®s->rctrl); + iowrite32be(tctrl, ®s->tctrl); + + return 0; +} + int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h index 1a689adf5a22..5149d96ec2c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h @@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr); int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version); int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable); +int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable); #endif /* __DTSEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 446a97b792e3..bc6eb30aa20f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable) return 0; } +int memac_set_tstamp(struct fman_mac *memac, bool enable) +{ + return 0; /* Always enabled. */ +} + int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) { struct memac_regs __iomem *regs = memac->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h index b5a50338ed9a..b2c671ec0ce7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.h +++ b/drivers/net/ethernet/freescale/fman/fman_memac.h @@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac, int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr); int memac_set_allmulti(struct fman_mac *memac, bool enable); +int memac_set_tstamp(struct fman_mac *memac, bool enable); #endif /* __MEMAC_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ecbf6187e13a..ee82ee1384eb 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1739,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset) } EXPORT_SYMBOL(fman_port_get_hash_result_offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp) +{ + if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE) + return -EINVAL; + + *tstamp = be64_to_cpu(*(__be64 *)(data + + port->buffer_offsets.time_stamp_offset)); + + return 0; +} +EXPORT_SYMBOL(fman_port_get_tstamp); + static int fman_port_probe(struct platform_device *of_dev) { struct fman_port *port; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h index e86ca6a34e4e..9dbb69f40121 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.h +++ b/drivers/net/ethernet/freescale/fman/fman_port.h @@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port); int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset); +int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp); + struct fman_port *fman_port_bind(struct device *dev); #endif /* __FMAN_PORT_H */ diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 284735d4ebe9..40705938eecc 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -44,6 +44,7 @@ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff /* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_EN_TIMESTAMP 0x00100000 #define CMD_CFG_NO_LEN_CHK 0x00020000 #define CMD_CFG_PAUSE_IGNORE 0x00000100 #define CMF_CFG_CRC_FWD 0x00000040 @@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable) return 0; } +int tgec_set_tstamp(struct fman_mac *tgec, bool enable) +{ + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + if (!is_init_done(tgec->cfg)) + return -EINVAL; + + tmp = ioread32be(®s->command_config); + + if (enable) + tmp |= CMD_CFG_EN_TIMESTAMP; + else + tmp &= ~CMD_CFG_EN_TIMESTAMP; + + iowrite32be(tmp, ®s->command_config); + + return 0; +} + int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) { struct tgec_regs __iomem *regs = tgec->regs; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h index cbbd3b422a98..3bfd1062b386 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.h +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h @@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr); int tgec_get_version(struct fman_mac *tgec, u32 *mac_version); int tgec_set_allmulti(struct fman_mac *tgec, bool enable); +int tgec_set_tstamp(struct fman_mac *tgec, bool enable); #endif /* __TGEC_H */ diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b5b95f52c09..a847b9c3b31a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; + mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; + mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; @@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; + mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b520cec120ee..824a81a9f350 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -68,6 +68,7 @@ struct mac_device { int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); + int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index 1fc27c97e3b2..99fe2c210d0f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -18,6 +18,7 @@ #include <linux/string.h> #include <linux/ptrace.h> #include <linux/errno.h> +#include <linux/crc32.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -176,21 +177,10 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 *mac) { struct fs_enet_private *fep = netdev_priv(dev); - int temp, hash_index, i, j; + int temp, hash_index; u32 crc, csrVal; - u8 byte, msb; - - crc = 0xffffffff; - for (i = 0; i < 6; i++) { - byte = mac[i]; - for (j = 0; j < 8; j++) { - msb = crc >> 31; - crc <<= 1; - if (msb ^ (byte & 0x1)) - crc ^= FEC_CRC_POLY; - byte >>= 1; - } - } + + crc = ether_crc(6, mac); temp = (crc & 0x3f) >> 1; hash_index = ((temp & 0x01) << 4) | diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8cb98cae0a6f..395a5266ea30 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) { - unsigned int last_rule_idx = priv->cur_filer_idx; unsigned int cmp_rqfpr; unsigned int *local_rqfpr; unsigned int *local_rqfcr; @@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, } priv->cur_filer_idx = l - 1; - last_rule_idx = l; /* hash rules */ ethflow_to_filer_rules(priv, ethflow); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 42fca3208c0b..22a817da861e 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) ugeth_vdbg("%s: IN", __func__); + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&ugeth->lock, flags); dev->stats.tx_bytes += skb->len; @@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) { /* Start from the next BD that should be filled */ struct ucc_geth_private *ugeth = netdev_priv(dev); + unsigned int bytes_sent = 0; + int howmany = 0; u8 __iomem *bd; /* BD pointer */ u32 bd_status; @@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; if (!skb) break; - + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; dev_consume_skb_any(skb); @@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) bd_status = in_be32((u32 __iomem *)bd); } ugeth->confBd[txQ] = bd; + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev) phy_start(ugeth->phydev); napi_enable(&ugeth->napi); + netdev_reset_queue(dev); netif_start_queue(dev); device_set_wakeup_capable(&dev->dev, @@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev) free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); netif_stop_queue(dev); + netdev_reset_queue(dev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index fb1a7251f45d..25152715396b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -85,10 +85,12 @@ config HNS3 drivers(like ODP)to register with HNAE devices and their associated operations. +if HNS3 + config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + default m depends on PCI_MSI - depends on HNS3 ---help--- This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of @@ -97,16 +99,15 @@ config HNS3_HCLGE config HNS3_DCB bool "Hisilicon HNS3 Data Center Bridge Support" default n - depends on HNS3 && HNS3_HCLGE && DCB + depends on HNS3_HCLGE && DCB ---help--- Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. If unsure, say N. config HNS3_HCLGEVF - tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" - depends on PCI_MSI - depends on HNS3 + tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI depends on HNS3_HCLGE ---help--- This selects the HNS3 VF drivers network acceleration engine & its hardware @@ -115,11 +116,13 @@ config HNS3_HCLGEVF config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" + default m depends on 64BIT && PCI - depends on HNS3 ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +endif #HNS3 + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 340e28211135..14374a856d30 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev) hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); hip04_config_fifo(priv); - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); hip04_update_mac_address(ndev); ret = hip04_alloc_ring(ndev, d); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 25a6c8722eca..c5727003af8c 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1006,12 +1006,11 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) for (i = 0; i < QUEUE_NUMS; i++) { size = priv->pool[i].count * sizeof(struct hix5hd2_desc); - virt_addr = dma_alloc_coherent(dev, size, &phys_addr, - GFP_KERNEL); + virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, + GFP_KERNEL); if (virt_addr == NULL) goto error_free_pool; - memset(virt_addr, 0, size); priv->pool[i].size = size; priv->pool[i].desc = virt_addr; priv->pool[i].phys_addr = phys_addr; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index bd68379d2bea..e6aad30e7e69 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -70,8 +70,8 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) return container_of(q, struct ring_pair_cb, q); } -struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, - u32 port_id) +static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, + u32 port_id) { int vfnum_per_port; int qnum_per_vf; @@ -329,7 +329,7 @@ static int hns_ae_start(struct hnae_handle *handle) return 0; } -void hns_ae_stop(struct hnae_handle *handle) +static void hns_ae_stop(struct hnae_handle *handle) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); @@ -357,7 +357,7 @@ static void hns_ae_reset(struct hnae_handle *handle) } } -void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) +static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) { u32 flag; @@ -577,8 +577,8 @@ static void hns_ae_get_coalesce_range(struct hnae_handle *handle, *rx_usecs_high = HNS_RCB_RX_USECS_HIGH; } -void hns_ae_update_stats(struct hnae_handle *handle, - struct net_device_stats *net_stats) +static void hns_ae_update_stats(struct hnae_handle *handle, + struct net_device_stats *net_stats) { int port; int idx; @@ -660,7 +660,7 @@ void hns_ae_update_stats(struct hnae_handle *handle, net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts; } -void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) +static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) { int idx; struct hns_mac_cb *mac_cb; @@ -692,8 +692,8 @@ void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index); } -void hns_ae_get_strings(struct hnae_handle *handle, - u32 stringset, u8 *data) +static void hns_ae_get_strings(struct hnae_handle *handle, + u32 stringset, u8 *data) { int port; int idx; @@ -725,7 +725,7 @@ void hns_ae_get_strings(struct hnae_handle *handle, hns_dsaf_get_strings(stringset, p, port, dsaf_dev); } -int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) +static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) { u32 sset_count = 0; struct hns_mac_cb *mac_cb; @@ -771,7 +771,7 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, return ret; } -void hns_ae_update_led_status(struct hnae_handle *handle) +static void hns_ae_update_led_status(struct hnae_handle *handle) { struct hns_mac_cb *mac_cb; @@ -783,8 +783,8 @@ void hns_ae_update_led_status(struct hnae_handle *handle) hns_set_led_opt(mac_cb); } -int hns_ae_cpld_set_led_id(struct hnae_handle *handle, - enum hnae_led_state status) +static int hns_ae_cpld_set_led_id(struct hnae_handle *handle, + enum hnae_led_state status) { struct hns_mac_cb *mac_cb; @@ -795,7 +795,7 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle, return hns_cpld_led_set_id(mac_cb, status); } -void hns_ae_get_regs(struct hnae_handle *handle, void *data) +static void hns_ae_get_regs(struct hnae_handle *handle, void *data) { u32 *p = data; int i; @@ -820,7 +820,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data) hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p); } -int hns_ae_get_regs_len(struct hnae_handle *handle) +static int hns_ae_get_regs_len(struct hnae_handle *handle) { u32 total_num; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 74bd260ca02a..5488c6e89f21 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -339,7 +339,7 @@ static void hns_gmac_init(void *mac_drv) GMAC_TX_WATER_LINE_SHIFT, 8); } -void hns_gmac_update_stats(void *mac_drv) +static void hns_gmac_update_stats(void *mac_drv) { struct mac_hw_stats *hw_stats = NULL; struct mac_driver *drv = (struct mac_driver *)mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 9dcc5765f11f..6e5107d428c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -931,8 +931,9 @@ static int hns_mac_get_mode(phy_interface_t phy_if) } } -u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, - struct hns_mac_cb *mac_cb, u32 mac_mode_idx) +static u8 __iomem * +hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, + struct hns_mac_cb *mac_cb, u32 mac_mode_idx) { u8 __iomem *base = dsaf_dev->io_base; int mac_id = mac_cb->mac_id; @@ -950,7 +951,8 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, * @mac_cb: mac control block * return 0 - success , negative --fail */ -int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) +static int +hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) { int ret; u32 mac_mode_idx; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 0ce07f6eb1e6..619e6ce465c2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -28,7 +28,7 @@ #include "hns_dsaf_rcb.h" #include "hns_dsaf_misc.h" -const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { +const static char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", @@ -42,7 +42,7 @@ static const struct acpi_device_id hns_dsaf_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match); -int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) +static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) { int ret, i; u32 desc_num; @@ -959,7 +959,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) spin_unlock_bh(&dsaf_dev->tcam_lock); } -void hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) +static void +hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr) { addr[0] = mac_key->high.bits.mac_0; addr[1] = mac_key->high.bits.mac_1; @@ -2084,8 +2085,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id, * @dsaf_id: dsa fabric id * @xge_ge_work_mode */ -void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, - enum dsaf_port_rate_mode rate_mode) +static void +hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id, + enum dsaf_port_rate_mode rate_mode) { u32 port_work_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index acf29633ec79..16294cd3c954 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -340,7 +340,8 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, * bit18-19 for com/dfx * @enable: false - request reset , true - drop reset */ -void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) +static void +hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) { u32 reg_addr; @@ -362,7 +363,7 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) * bit18-19 for com/dfx * @enable: false - request reset , true - drop reset */ -void +static void hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) { hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, @@ -370,7 +371,7 @@ hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) msk, dereset); } -void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) +static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) { if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); @@ -384,7 +385,7 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset) } } -void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset) +static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset) { hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, HNS_ROCE_RESET_FUNC, 0, dereset); @@ -568,7 +569,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) return phy_if; } -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { u32 val = 0; int ret; @@ -586,7 +587,7 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) return 0; } -int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { union acpi_object *obj; union acpi_object obj_args, argv4; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 93e71e27401b..d160d8c9e45b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -73,7 +73,7 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) * comm_index: common index * retuen 0 - success , negative --fail */ -int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) +static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) { struct ppe_common_cb *ppe_common; int ppe_num; @@ -104,7 +104,8 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) return 0; } -void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) +static void +hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) { dsaf_dev->ppe_common[comm_index] = NULL; } @@ -203,9 +204,9 @@ static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0); - mdelay(100); + msleep(100); dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1); - mdelay(100); + msleep(100); if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { switch (dsaf_mode) { @@ -337,7 +338,7 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) } } -void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) +static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) { u32 i; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index e2e28532e4dc..9d76e2e54f9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -705,7 +705,7 @@ void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, } } -int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) +static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) { switch (dsaf_dev->dsaf_mode) { case DSAF_MODE_ENABLE_FIX: @@ -741,7 +741,7 @@ int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) +static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 51e7e9f5af49..ba4316910dea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -215,10 +215,10 @@ static void hns_xgmac_init(void *mac_drv) u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); - mdelay(100); + msleep(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); - mdelay(100); + msleep(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); @@ -311,7 +311,7 @@ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } -void hns_xgmac_update_stats(void *mac_drv) +static void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ef9ef703d13a..c2ac187ec8fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1300,7 +1300,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p) return 0; } -void hns_nic_update_stats(struct net_device *netdev) +static void hns_nic_update_stats(struct net_device *netdev) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1582,7 +1582,7 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, /* use only for netconsole to poll with the device without interrupt */ #ifdef CONFIG_NET_POLL_CONTROLLER -void hns_nic_poll_controller(struct net_device *ndev) +static void hns_nic_poll_controller(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); unsigned long flags; @@ -1935,7 +1935,7 @@ static int hns_nic_uc_unsync(struct net_device *netdev, * * return void */ -void hns_set_multicast_list(struct net_device *ndev) +static void hns_set_multicast_list(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; @@ -1957,7 +1957,7 @@ void hns_set_multicast_list(struct net_device *ndev) } } -void hns_nic_set_rx_mode(struct net_device *ndev) +static void hns_nic_set_rx_mode(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; @@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev, static u16 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct hns_nic_priv *priv = netdev_priv(ndev); @@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, is_multicast_ether_addr(eth_hdr->h_dest)) return 0; else - return fallback(ndev, skb); + return fallback(ndev, skb, NULL); } static const struct net_device_ops hns_nic_netdev_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 2e14a3ae1d8b..3957205abb81 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -658,8 +658,8 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, * @dev: net device * @param: ethtool parameter */ -void hns_get_ringparam(struct net_device *net_dev, - struct ethtool_ringparam *param) +static void hns_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *param) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; @@ -808,7 +808,8 @@ static int hns_set_coalesce(struct net_device *net_dev, * @dev: net device * @ch: channel info. */ -void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) +static void +hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) { struct hns_nic_priv *priv = netdev_priv(net_dev); @@ -825,8 +826,8 @@ void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) * @stats: statistics info. * @data: statistics data. */ -void hns_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static void hns_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) { u64 *p = data; struct hns_nic_priv *priv = netdev_priv(netdev); @@ -883,7 +884,7 @@ void hns_get_ethtool_stats(struct net_device *netdev, * @stats: string set ID. * @data: objects data. */ -void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -973,7 +974,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) * * Return string set count. */ -int hns_get_sset_count(struct net_device *netdev, int stringset) +static int hns_get_sset_count(struct net_device *netdev, int stringset) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1007,7 +1008,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) * * Return 0 on success, negative on failure. */ -int hns_phy_led_set(struct net_device *netdev, int value) +static int hns_phy_led_set(struct net_device *netdev, int value) { int retval; struct phy_device *phy_dev = netdev->phydev; @@ -1029,7 +1030,8 @@ int hns_phy_led_set(struct net_device *netdev, int value) * * Return 0 on success, negative on failure. */ -int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +static int +hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; @@ -1103,8 +1105,8 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) * @cmd: ethtool cmd * @date: register data */ -void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, - void *data) +static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, + void *data) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9d79dad2c6aa..fff5be8078ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -1,14 +1,7 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/list.h> -#include <linux/slab.h> #include <linux/spinlock.h> #include "hnae3.h" @@ -41,13 +34,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, { switch (client->type) { case HNAE3_CLIENT_KNIC: - hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_UNIC: - hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); break; case HNAE3_CLIENT_ROCE: - hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); break; default: break; @@ -61,16 +54,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_UNIC: - inited = hnae_get_bit(ae_dev->flag, + inited = hnae3_get_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B); break; case HNAE3_CLIENT_ROCE: - inited = hnae_get_bit(ae_dev->flag, - HNAE3_ROCE_CLIENT_INITED_B); + inited = hnae3_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); break; default: break; @@ -86,7 +79,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* check if this client matches the type of ae_dev */ if (!(hnae3_client_match(client->type, ae_dev->dev_type) && - hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { return 0; } @@ -95,7 +88,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ret = ae_dev->ops->init_client_instance(client, ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, - "fail to instantiate client\n"); + "fail to instantiate client, ret = %d\n", ret); return ret; } @@ -135,7 +128,8 @@ int hnae3_register_client(struct hnae3_client *client) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed for port\n"); + "match and instantiation failed for port, ret = %d\n", + ret); } exit: @@ -185,11 +179,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ae_dev->ops = ae_algo->ops; ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); continue; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and * initialize the figure out client instance @@ -198,7 +193,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } } @@ -218,7 +214,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -232,7 +228,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_algo->node); @@ -271,11 +267,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { - dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + dev_err(&ae_dev->pdev->dev, + "init ae_dev error, ret = %d\n", ret); goto out_err; } - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -286,7 +283,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hnae3_match_n_instantiate(client, ae_dev, true); if (ret) dev_err(&ae_dev->pdev->dev, - "match and instantiation failed\n"); + "match and instantiation failed, ret = %d\n", + ret); } out_err: @@ -306,7 +304,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { - if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) continue; id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); @@ -317,7 +315,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) hnae3_match_n_instantiate(client, ae_dev, false); ae_algo->ops->uninit_ae_dev(ae_dev); - hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); } list_del(&ae_dev->node); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8acb1d116a02..67befff0bfc5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNAE3_H #define __HNAE3_H @@ -62,10 +56,10 @@ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) #define hnae3_dev_dcb_supported(hdev) \ - hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) + hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) @@ -167,7 +161,6 @@ struct hnae3_client_ops { #define HNAE3_CLIENT_NAME_LENGTH 16 struct hnae3_client { char name[HNAE3_CLIENT_NAME_LENGTH]; - u16 version; unsigned long state; enum hnae3_client_type type; const struct hnae3_client_ops *ops; @@ -436,7 +429,6 @@ struct hnae3_dcb_ops { struct hnae3_ae_algo { const struct hnae3_ae_ops *ops; struct list_head node; - char name[HNAE3_CLASS_NAME_SIZE]; const struct pci_device_id *pdev_id_table; }; @@ -509,17 +501,17 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ }; -#define hnae_set_field(origin, mask, shift, val) \ +#define hnae3_set_field(origin, mask, shift, val) \ do { \ (origin) &= (~(mask)); \ (origin) |= ((val) << (shift)) & (mask); \ } while (0) -#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) +#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) -#define hnae_set_bit(origin, shift, val) \ - hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) -#define hnae_get_bit(origin, shift) \ - hnae_get_field((origin), (0x1 << (shift)), (shift)) +#define hnae3_set_bit(origin, shift, val) \ + hnae3_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae3_get_bit(origin, shift) \ + hnae3_get_field((origin), (0x1 << (shift)), (shift)) void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index eb82700da7d0..ea5f8a84070d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include "hnae3.h" #include "hns3_enet.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 25a73bb2e642..6c9e5d62455b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/dma-mapping.h> #include <linux/etherdevice.h> @@ -62,9 +56,9 @@ static const struct pci_device_id hns3_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); -static irqreturn_t hns3_irq_handle(int irq, void *dev) +static irqreturn_t hns3_irq_handle(int irq, void *vector) { - struct hns3_enet_tqp_vector *tqp_vector = dev; + struct hns3_enet_tqp_vector *tqp_vector = vector; napi_schedule(&tqp_vector->napi); @@ -239,7 +233,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; - int ret; + int i, ret; + + if (kinfo->num_tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, kinfo->num_tc); + if (ret) { + netdev_err(netdev, + "netdev_set_num_tc fail, ret=%d!\n", ret); + return ret; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; + + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + } ret = netif_set_real_num_tx_queues(netdev, queue_size); if (ret) { @@ -312,7 +327,9 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo; + int i, ret; netif_carrier_off(netdev); @@ -327,6 +344,12 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } + kinfo = &h->kinfo; + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(netdev, i, + kinfo->prio_tc[i]); + } + priv->ae_handle->last_reset_time = jiffies; return 0; } @@ -493,8 +516,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -586,21 +609,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -609,16 +632,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, + ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -634,22 +658,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -703,32 +729,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hnae3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -749,43 +777,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); } switch (l4_proto) { case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hnae3_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -807,11 +835,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_S, 0); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -844,10 +872,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -880,7 +908,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, u16 out_vtag = 0; u32 paylen = 0; u16 mss = 0; - __be16 protocol; u8 ol4_proto; u8 il4_proto; int ret; @@ -909,7 +936,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); - protocol = skb->protocol; ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (ret) @@ -1135,7 +1161,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) wmb(); /* Commit all data before submit */ - hnae_queue_xmit(ring->tqp, buf_num); + hnae3_queue_xmit(ring->tqp, buf_num); return NETDEV_TX_OK; @@ -1304,7 +1330,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; bool if_running; - unsigned int i; int ret; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && @@ -1328,24 +1353,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (ret) goto out; - if (tc <= 1) { - netdev_reset_tc(netdev); - } else { - ret = netdev_set_num_tc(netdev, tc); - if (ret) - goto out; - - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } - } - ret = hns3_nic_set_real_num_queue(netdev); out: @@ -1703,7 +1710,7 @@ static void hns3_set_default_feature(struct net_device *netdev) static int hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - unsigned int order = hnae_page_order(ring); + unsigned int order = hnae3_page_order(ring); struct page *p; p = dev_alloc_pages(order); @@ -1714,7 +1721,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->page_offset = 0; cb->reuse_flag = 0; cb->buf = page_address(p); - cb->length = hnae_page_size(ring); + cb->length = hnae3_page_size(ring); cb->type = DESC_TYPE_PAGE; return 0; @@ -1780,33 +1787,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) /* free desc along with its attached buffer */ static void hns3_free_desc(struct hns3_enet_ring *ring) { + int size = ring->desc_num * sizeof(ring->desc[0]); + hns3_free_buffers(ring); - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hns3_alloc_desc(struct hns3_enet_ring *ring) { int size = ring->desc_num * sizeof(ring->desc[0]); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, + &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } @@ -1887,7 +1888,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ hns3_free_buffer_detach(ring, ring->next_to_clean); ring_ptr_move_fw(ring, next_to_clean); @@ -1917,7 +1918,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) if (is_ring_empty(ring) || head == ring->next_to_clean) return true; /* no data to poll */ - if (!is_valid_clean_head(ring, head)) { + if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); @@ -2016,15 +2017,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, bool twobufs; twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); desc = &ring->desc[ring->next_to_clean]; size = le16_to_cpu(desc->rx.size); - truesize = hnae_buf_size(ring); + truesize = hnae3_buf_size(ring); if (!twobufs) - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); @@ -2076,13 +2077,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { netdev_err(netdev, "L3/L4 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; @@ -2091,23 +2092,24 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_NVGRE: skb->csum_level = 1; case HNS3_OL4_TYPE_NO_TUN: /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) + if ((l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP)) skb->ip_summed = CHECKSUM_UNNECESSARY; break; } @@ -2135,8 +2137,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 - switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, - HNS3_RXD_STRP_TAGP_S)) { + switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); break; @@ -2174,7 +2176,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) return -EFAULT; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; @@ -2229,7 +2231,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); @@ -2257,7 +2259,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); u64_stats_update_begin(&ring->syncp); @@ -2269,7 +2271,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { netdev_err(netdev, "truncated pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.err_pkt_len++; @@ -2279,7 +2281,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return -EFAULT; } - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { netdev_err(netdev, "L2 error pkt\n"); u64_stats_update_begin(&ring->syncp); ring->stats.l2_err++; @@ -2532,10 +2534,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, tx_ring = tqp_vector->tx_group.ring; if (tx_ring) { cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); cur_chain->next = NULL; @@ -2549,12 +2551,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + HNAE3_RING_GL_TX); cur_chain = chain; } @@ -2564,10 +2566,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, if (!tx_ring && rx_ring) { cur_chain->next = NULL; cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); rx_ring = rx_ring->next; } @@ -2579,10 +2581,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, cur_chain->next = chain; chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); cur_chain = chain; @@ -2745,10 +2747,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) if (ret) return ret; - ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); - if (ret) - return ret; - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { @@ -2809,7 +2807,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->io_base = q->io_base; } - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); ring->tqp = q; ring->desc = NULL; @@ -3081,7 +3079,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->reset_level = HNAE3_NONE_RESET; priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; @@ -3102,6 +3099,11 @@ static int hns3_client_init(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + if (handle->flags & HNAE3_SUPPORT_VF) + handle->reset_level = HNAE3_VF_RESET; + else + handle->reset_level = HNAE3_FUNC_RESET; + ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; @@ -3208,7 +3210,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) struct net_device *ndev = kinfo->netdev; bool if_running; int ret; - u8 i; if (tc > HNAE3_MAX_TC) return -EINVAL; @@ -3218,10 +3219,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if_running = netif_running(ndev); - ret = netdev_set_num_tc(ndev, tc); - if (ret) - return ret; - if (if_running) { (void)hns3_nic_net_stop(ndev); msleep(100); @@ -3232,27 +3229,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (ret) goto err_out; - if (tc <= 1) { - netdev_reset_tc(ndev); - goto out; - } - - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; - - if (tc_info->enable) - netdev_set_tc_queue(ndev, - tc_info->tc, - tc_info->tqp_count, - tc_info->tqp_offset); - } - - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(ndev, i, - kinfo->prio_tc[i]); - } - -out: ret = hns3_nic_set_real_num_queue(ndev); err_out: @@ -3418,7 +3394,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) struct net_device *ndev = kinfo->netdev; if (!netif_running(ndev)) - return -EIO; + return 0; return hns3_nic_net_stop(ndev); } @@ -3458,10 +3434,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - ret = hns3_nic_init_vector_data(priv); if (ret) return ret; @@ -3493,10 +3465,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_put_ring_config(priv); - - priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 3b083d5ae9ce..e4b4a8f2ceaa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HNS3_ENET_H #define __HNS3_ENET_H @@ -499,7 +493,6 @@ struct hns3_enet_tqp_vector { u16 num_tqps; /* total number of tqps in TQP vector */ - cpumask_t affinity_mask; char name[HNAE3_INT_NAME_LEN]; /* when 0 should adjust interrupt coalesce parameter */ @@ -591,7 +584,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_write_dev(a, reg, value) \ hns3_write_reg((a)->io_base, (reg), (value)) -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ +#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) @@ -601,9 +594,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) +#define hnae3_buf_size(_ring) ((_ring)->buf_size) +#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring))) +#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring)) /* iterator for handling rings in ring group */ #define hns3_for_each_ring(pos, head) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 40c0425b4023..80ba95d76260 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> #include <linux/string.h> @@ -201,7 +195,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) rx_group = &ring->tqp_vector->rx_group; pre_rx_pkt = rx_group->total_packets; + preempt_disable(); hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + preempt_enable(); rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); rx_group->total_packets = pre_rx_pkt; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index c36d64710fa6..ac13cb2b168e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/dma-mapping.h> #include <linux/slab.h> @@ -18,8 +12,7 @@ #include "hclge_main.h" #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) -#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) + #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) static int hclge_ring_space(struct hclge_cmq_ring *ring) @@ -46,31 +39,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); + int size = ring->desc_num * sizeof(struct hclge_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) @@ -80,7 +66,7 @@ static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; int ret; - ring->flag = ring_type; + ring->ring_type = ring_type; ring->dev = hdev; ret = hclge_alloc_cmd_desc(ring); @@ -111,8 +97,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); - else - desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); } static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) @@ -121,26 +105,26 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) struct hclge_dev *hdev = ring->dev; struct hclge_hw *hw = &hdev->hw; - if (ring->flag == HCLGE_TYPE_CSQ) { + if (ring->ring_type == HCLGE_TYPE_CSQ) { hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); } else { hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, - (u32)dma); + lower_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); + upper_32_bits(dma)); hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | HCLGE_NIC_CMQ_ENABLE); - hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); } } @@ -152,33 +136,27 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclge_desc *desc; - int clean = 0; u32 head; + int clean; - desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, - csq->next_to_use, csq->next_to_clean); - return 0; - } - - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + dev_warn(&hdev->pdev->dev, + "IMP firmware watchdog reset soon expected!\n"); + return -EIO; } - csq->next_to_clean = ntc; + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -216,7 +194,7 @@ static bool hclge_is_special_opcode(u16 opcode) **/ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) { - struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_desc *desc_to_use; bool complete = false; u32 timeout = 0; @@ -227,7 +205,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclge_ring_space(&hw->cmq.csq)) { + if (num > hclge_ring_space(&hw->cmq.csq) || + test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) */ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { do { - if (hclge_cmd_csq_done(hw)) + if (hclge_cmd_csq_done(hw)) { + complete = true; break; + } udelay(1); timeout++; } while (timeout < hw->cmq.tx_timeout); } - if (hclge_cmd_csq_done(hw)) { - complete = true; + if (!complete) { + retval = -EAGAIN; + } else { handle = 0; while (handle < num) { /* Get the result of hardware write back */ desc_to_use = &hw->cmq.csq.desc[ntc]; desc[handle] = *desc_to_use; - pr_debug("Get cmd desc:\n"); if (likely(!hclge_is_special_opcode(opcode))) desc_ret = le16_to_cpu(desc[handle].retval); else desc_ret = le16_to_cpu(desc[0].retval); - if ((enum hclge_cmd_return_status)desc_ret == - HCLGE_CMD_EXEC_SUCCESS) + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) retval = 0; else retval = -EIO; - hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + hw->cmq.last_status = desc_ret; ntc++; handle++; if (ntc == hw->cmq.csq.desc_num) @@ -290,15 +270,13 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } } - if (!complete) - retval = -EAGAIN; - /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); - if (handle != num) { + if (handle < 0) + retval = handle; + else if (handle != num) dev_warn(&hdev->pdev->dev, "cleaned %d, need to clean %d\n", handle, num); - } spin_unlock_bh(&hw->cmq.csq.lock); @@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) spin_lock_init(&hdev->hw.cmq.crq.lock); hclge_cmd_init_regs(&hdev->hw); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index d9aaa76c76eb..5cd22f9bbdec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_CMD_H #define __HCLGE_CMD_H @@ -27,17 +21,10 @@ struct hclge_desc { __le32 data[6]; }; -struct hclge_desc_cb { - dma_addr_t dma; - void *va; - u32 length; -}; - struct hclge_cmq_ring { dma_addr_t desc_dma_addr; struct hclge_desc *desc; - struct hclge_desc_cb *desc_cb; - struct hclge_dev *dev; + struct hclge_dev *dev; u32 head; u32 tail; @@ -45,7 +32,7 @@ struct hclge_cmq_ring { u16 desc_num; int next_to_use; int next_to_clean; - u8 flag; + u8 ring_type; /* cmq ring type */ spinlock_t lock; /* Command queue lock */ }; @@ -71,26 +58,19 @@ struct hclge_misc_vector { struct hclge_cmq { struct hclge_cmq_ring csq; struct hclge_cmq_ring crq; - u16 tx_timeout; /* Tx timeout */ + u16 tx_timeout; enum hclge_cmd_status last_status; }; -#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0 -#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1 -#define HCLGE_CMD_FLAG_NEXT_SHIFT 2 -#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3 -#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4 -#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5 - -#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT) -#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT) -#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT) -#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT) -#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT) -#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) +#define HCLGE_CMD_FLAG_IN BIT(0) +#define HCLGE_CMD_FLAG_OUT BIT(1) +#define HCLGE_CMD_FLAG_NEXT BIT(2) +#define HCLGE_CMD_FLAG_WR BIT(3) +#define HCLGE_CMD_FLAG_NO_INTR BIT(4) +#define HCLGE_CMD_FLAG_ERR_INTR BIT(5) enum hclge_opcode_type { - /* Generic command */ + /* Generic commands */ HCLGE_OPC_QUERY_FW_VER = 0x0001, HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, HCLGE_OPC_GBL_RST_STATUS = 0x0021, @@ -106,18 +86,16 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, - /* Device management command */ - /* MAC commond */ + /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_AN_MODE = 0x0304, HCLGE_OPC_QUERY_AN_RESULT = 0x0306, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - /* MACSEC command */ - /* PFC/Pause CMD*/ + /* PFC/Pause commands */ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, HCLGE_OPC_CFG_MAC_PARA = 0x0703, @@ -148,7 +126,7 @@ enum hclge_opcode_type { HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, - /* Packet buffer allocate command */ + /* Packet buffer allocate commands */ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, @@ -156,11 +134,10 @@ enum hclge_opcode_type { HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, - /* PTP command */ /* TQP management command */ HCLGE_OPC_SET_TQP_MAP = 0x0A01, - /* TQP command */ + /* TQP commands */ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, @@ -172,10 +149,10 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, - /* TSO cmd */ + /* TSO command */ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, - /* RSS cmd */ + /* RSS commands */ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGE_OPC_RSS_TC_MODE = 0x0D08, @@ -184,15 +161,15 @@ enum hclge_opcode_type { /* Promisuous mode command */ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, - /* Vlan offload command */ + /* Vlan offload commands */ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, - /* Interrupts cmd */ + /* Interrupts commands */ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, - /* MAC command */ + /* MAC commands */ HCLGE_OPC_MAC_VLAN_ADD = 0x1000, HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, @@ -201,13 +178,13 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - /* Multicast linear table cmd */ + /* Multicast linear table commands */ HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, - /* VLAN command */ + /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, @@ -215,7 +192,7 @@ enum hclge_opcode_type { /* MDIO command */ HCLGE_OPC_MDIO_CONFIG = 0x1900, - /* QCN command */ + /* QCN commands */ HCLGE_OPC_QCN_MOD_CFG = 0x1A01, HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, @@ -225,7 +202,7 @@ enum hclge_opcode_type { HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, - /* Mailbox cmd */ + /* Mailbox command */ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, /* Led command */ @@ -382,7 +359,7 @@ struct hclge_pf_res_cmd { __le16 msixcap_localid_ba_nic; __le16 msixcap_localid_ba_rocee; #define HCLGE_PF_VEC_NUM_S 0 -#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S) +#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) __le16 pf_intr_vector_number; __le16 pf_own_fun_number; __le32 rsv[3]; @@ -471,8 +448,8 @@ struct hclge_rss_tc_mode_cmd { u8 rsv[8]; }; -#define HCLGE_LINK_STS_B 0 -#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) +#define HCLGE_LINK_STATUS_UP_B 0 +#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B) struct hclge_link_status_cmd { u8 status; u8 rsv[23]; @@ -571,7 +548,8 @@ struct hclge_config_auto_neg_cmd { struct hclge_config_max_frm_size_cmd { __le16 max_frm_size; - u8 rsv[22]; + u8 min_frm_size; + u8 rsv[21]; }; enum hclge_mac_vlan_tbl_opcode { @@ -581,13 +559,13 @@ enum hclge_mac_vlan_tbl_opcode { HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ }; -#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0 -#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1 -#define HCLGE_MAC_EPORT_SW_EN_B 0xc -#define HCLGE_MAC_EPORT_TYPE_B 0xb -#define HCLGE_MAC_EPORT_VFID_S 0x3 +#define HCLGE_MAC_VLAN_BIT0_EN_B 0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 1 +#define HCLGE_MAC_EPORT_SW_EN_B 12 +#define HCLGE_MAC_EPORT_TYPE_B 11 +#define HCLGE_MAC_EPORT_VFID_S 3 #define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) -#define HCLGE_MAC_EPORT_PFID_S 0x0 +#define HCLGE_MAC_EPORT_PFID_S 0 #define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) struct hclge_mac_vlan_tbl_entry_cmd { u8 flags; @@ -603,7 +581,7 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0x0 +#define HCLGE_VLAN_MASK_EN_B 0 struct hclge_mac_vlan_mask_entry_cmd { u8 rsv0[2]; u8 vlan_mask; @@ -634,23 +612,23 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 +#define HCLGE_CFG_MTA_MAC_SEL_S 0 #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 0x7 +#define HCLGE_CFG_MTA_MAC_EN_B 7 struct hclge_mta_filter_mode_cmd { u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ u8 rsv[23]; }; -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 +#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 struct hclge_cfg_func_mta_filter_cmd { u8 accept; /* Only used lowest 1 bit */ u8 function_id; u8 rsv[22]; }; -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 +#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 +#define HCLGE_CFG_MTA_ITEM_IDX_S 0 #define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) struct hclge_cfg_func_mta_item_cmd { __le16 item_idx; /* Only used lowest 12 bit */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 955f0e3d5c95..f08ebb7caaaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include "hclge_main.h" #include "hclge_tm.h" diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h index 7d808ee96694..278f21e02736 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_DCB_H__ #define __HCLGE_DCB_H__ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d318d35e598f..a9b888f1544a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/acpi.h> #include <linux/device.h> @@ -939,8 +933,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) if (hnae3_dev_roce_supported(hdev)) { hdev->num_roce_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. @@ -948,8 +942,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = - hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); } return 0; @@ -1038,38 +1032,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ - cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_VMDQ_M, - HCLGE_CFG_VMDQ_S); - cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); - cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), - HCLGE_CFG_TQP_DESC_N_M, - HCLGE_CFG_TQP_DESC_N_S); - - cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_PHY_ADDR_M, - HCLGE_CFG_PHY_ADDR_S); - cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_MEDIA_TP_M, - HCLGE_CFG_MEDIA_TP_S); - cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_RX_BUF_LEN_M, - HCLGE_CFG_RX_BUF_LEN_S); + cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); /* get mac_address */ mac_addr_tmp = __le32_to_cpu(req->param[2]); - mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_MAC_ADDR_H_M, - HCLGE_CFG_MAC_ADDR_H_S); + mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; - cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_DEFAULT_SPEED_M, - HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1077,9 +1071,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); - cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), - HCLGE_CFG_SPEED_ABILITY_M, - HCLGE_CFG_SPEED_ABILITY_S); + cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_SPEED_ABILITY_M, + HCLGE_CFG_SPEED_ABILITY_S); } /* hclge_get_cfg: query the static parameter from flash @@ -1098,22 +1092,22 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); - hnae_set_field(offset, HCLGE_CFG_OFFSET_M, - HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); /* Len should be united by 4 bytes when send to hardware */ - hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, - HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); req->offset = cpu_to_le32(offset); } ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); if (ret) { - dev_err(&hdev->pdev->dev, - "get config failed %d.\n", ret); + dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); return ret; } hclge_parse_cfg(hcfg, desc); + return 0; } @@ -1130,13 +1124,10 @@ static int hclge_get_cap(struct hclge_dev *hdev) /* get pf resource */ ret = hclge_query_pf_resource(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "query pf resource error %d.\n", ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); - return 0; + return ret; } static int hclge_configure(struct hclge_dev *hdev) @@ -1189,7 +1180,7 @@ static int hclge_configure(struct hclge_dev *hdev) /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae_set_bit(hdev->hw_tc_map, i, 1); + hnae3_set_bit(hdev->hw_tc_map, i, 1); hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; @@ -1208,13 +1199,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, req = (struct hclge_cfg_tso_status_cmd *)desc.data; tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); req->tso_mss_min = cpu_to_le16(tso_mss); tso_mss = 0; - hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, - HCLGE_TSO_MSS_MIN_S, tso_mss_max); + hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); req->tso_mss_max = cpu_to_le16(tso_mss); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -1265,13 +1256,10 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, req->tqp_vid = cpu_to_le16(tqp_vid); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", - ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); - return 0; + return ret; } static int hclge_assign_tqp(struct hclge_vport *vport, @@ -1330,12 +1318,10 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) return -ENOMEM; ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); - return -EINVAL; - } - return 0; + return ret; } static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, @@ -1487,13 +1473,11 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, @@ -1501,13 +1485,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, { int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); - if (ret) { - dev_err(&hdev->pdev->dev, - "tx buffer alloc failed %d\n", ret); - return ret; - } + if (ret) + dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); - return 0; + return ret; } static int hclge_get_tc_num(struct hclge_dev *hdev) @@ -1825,17 +1806,13 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, (1 << HCLGE_TC0_PRI_BUF_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "rx private buffer alloc cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } -#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) - static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { @@ -1863,25 +1840,21 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, req->tc_wl[j].high = cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); req->tc_wl[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->tc_wl[j].low = cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); req->tc_wl[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } /* Send 2 descriptor at one time */ ret = hclge_cmd_send(&hdev->hw, desc, 2); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "rx private waterline config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_common_thrd_config(struct hclge_dev *hdev, @@ -1911,24 +1884,20 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, req->com_thrd[j].high = cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); req->com_thrd[j].high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_thrd[j].low = cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); req->com_thrd[j].low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << - HCLGE_RX_PRIV_EN_B); + cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); } } /* Send 2 descriptors at one time */ ret = hclge_cmd_send(&hdev->hw, desc, 2); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "common threshold config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_common_wl_config(struct hclge_dev *hdev, @@ -1943,23 +1912,17 @@ static int hclge_common_wl_config(struct hclge_dev *hdev, req = (struct hclge_rx_com_wl *)desc.data; req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); - req->com_wl.high |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); - req->com_wl.low |= - cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << - HCLGE_RX_PRIV_EN_B); + req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "common waterline config cmd failed %d\n", ret); - return ret; - } - return 0; + return ret; } int hclge_buffer_alloc(struct hclge_dev *hdev) @@ -2118,48 +2081,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); - hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); switch (speed) { case HCLGE_MAC_SPEED_10M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); break; case HCLGE_MAC_SPEED_100M: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); break; case HCLGE_MAC_SPEED_1G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); break; case HCLGE_MAC_SPEED_10G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); break; case HCLGE_MAC_SPEED_25G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); break; case HCLGE_MAC_SPEED_40G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); break; case HCLGE_MAC_SPEED_50G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); break; case HCLGE_MAC_SPEED_100G: - hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); return -EINVAL; } - hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, - 1); + hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2201,18 +2164,16 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return ret; } - *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); + *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); ret = hclge_parse_speed(speed_tmp, speed); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "could not parse speed(=%d), %d\n", speed_tmp, ret); - return -EIO; - } - return 0; + return ret; } static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) @@ -2225,17 +2186,15 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) @@ -2269,8 +2228,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); + hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); ether_addr_copy(req->mac_mask, mac_mask); status = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2341,13 +2300,11 @@ static int hclge_mac_init(struct hclge_dev *hdev) mtu = ETH_DATA_LEN; ret = hclge_set_mtu(handle, mtu); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); - return ret; - } - return 0; + return ret; } static void hclge_mbx_task_schedule(struct hclge_dev *hdev) @@ -2386,7 +2343,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS; + link_status = req->status & HCLGE_LINK_STATUS_UP_M; return !!link_status; } @@ -2505,7 +2462,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) u32 cmdq_src_reg; /* fetch the events from their corresponding regs */ - rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); /* Assumption: If by any chance reset and mailbox events are reported @@ -2517,12 +2474,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) /* check for vector0 reset event sources */ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; @@ -2614,6 +2573,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -2705,7 +2670,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) } val = hclge_read_dev(&hdev->hw, reg); - while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); val = hclge_read_dev(&hdev->hw, reg); cnt++; @@ -2727,8 +2692,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); - hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); req->fun_reset_vfid = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2747,13 +2711,13 @@ static void hclge_do_reset(struct hclge_dev *hdev) switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Global Reset requested\n"); break; case HNAE3_CORE_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); - hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); + hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); dev_info(&pdev->dev, "Core Reset requested\n"); break; @@ -2810,8 +2774,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); break; default: - dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", - hdev->reset_type); break; } @@ -2824,16 +2786,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) static void hclge_reset(struct hclge_dev *hdev) { - /* perform reset of the stack & ae device for a client */ + struct hnae3_handle *handle; + /* perform reset of the stack & ae device for a client */ + handle = &hdev->vport[0].nic; + rtnl_lock(); hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); if (!hclge_reset_wait(hdev)) { - rtnl_lock(); hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); - rtnl_unlock(); hclge_clear_reset_cause(hdev); } else { @@ -2843,6 +2806,8 @@ static void hclge_reset(struct hclge_dev *hdev) } hclge_notify_client(hdev, HNAE3_UP_CLIENT); + handle->last_reset_time = jiffies; + rtnl_unlock(); } static void hclge_reset_event(struct hnae3_handle *handle) @@ -2855,8 +2820,13 @@ static void hclge_reset_event(struct hnae3_handle *handle) * know this if last reset request did not occur very recently (watchdog * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) * In case of new request we reset the "reset level" to PF reset. + * And if it is a repeat reset request of the most recent one then we + * want to make sure we throttle the reset request. Therefore, we will + * not allow it again before 3*HZ times. */ - if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) + if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + return; + else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) handle->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", @@ -2868,8 +2838,6 @@ static void hclge_reset_event(struct hnae3_handle *handle) if (handle->reset_level < HNAE3_GLOBAL_RESET) handle->reset_level++; - - handle->last_reset_time = jiffies; } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -3110,23 +3078,21 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u16 mode = 0; - hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); - hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, - HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, - HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); req->rss_tc_mode[i] = cpu_to_le16(mode); } ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Configure rss tc mode fail, status = %d\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) @@ -3149,13 +3115,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Configure rss input fail, status = %d\n", ret); - return ret; - } - - return 0; + return ret; } static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, @@ -3491,16 +3454,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, i = 0; for (node = ring_chain; node; node = node->next) { tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); - hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S)); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, + HCLGE_INT_GL_IDX_S, + hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S)); req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; @@ -3603,12 +3566,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Set promisc mode fail, status is %d.\n", ret); - return ret; - } - return 0; + + return ret; } void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -3648,20 +3610,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3689,7 +3651,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3953,20 +3915,18 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, req = (struct hclge_mta_filter_mode_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Config mat filter mode failed for cmd_send, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, @@ -3980,19 +3940,17 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); + hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); req->function_id = func_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "Config func_id enable failed for cmd_send, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } static int hclge_set_mta_table_item(struct hclge_vport *vport, @@ -4007,10 +3965,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); + hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); req->item_idx = cpu_to_le16(item_idx); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -4257,17 +4215,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); - hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, - HCLGE_MAC_EPORT_VFID_S, vport->vport_id); - hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, - HCLGE_MAC_EPORT_PFID_S, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); @@ -4318,8 +4269,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); @@ -4331,7 +4282,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); - return hclge_add_mc_addr_common(vport, addr); + return hclge_add_mc_addr_common(vport, addr); } int hclge_add_mc_addr_common(struct hclge_vport *vport, @@ -4351,10 +4302,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4418,10 +4369,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4604,13 +4555,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req->vlan_fe = filter_en; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", ret); - return ret; - } - return 0; + return ret; } #define HCLGE_FILTER_TYPE_VF 0 @@ -4802,19 +4751,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, - vcfg->accept_tag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, - vcfg->accept_untag1 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, - vcfg->accept_tag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, - vcfg->accept_untag2 ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, - vcfg->insert_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, - vcfg->insert_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4840,14 +4789,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, - vcfg->strip_tag1_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, - vcfg->strip_tag2_en ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, - vcfg->vlan1_vlan_prionly ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, - vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_bitmap[req->vf_offset] = @@ -4999,16 +4948,15 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) req = (struct hclge_config_max_frm_size_cmd *)desc.data; req->max_frm_size = cpu_to_le16(max_frm_size); + req->min_frm_size = HCLGE_MAC_MIN_FRAME; ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - return ret; - } - - hdev->mps = max_frm_size; + else + hdev->mps = max_frm_size; - return 0; + return ret; } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) @@ -5043,7 +4991,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); - hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -5073,7 +5021,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return ret; } - return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); + return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, @@ -5380,12 +5328,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); retval = phy_read(phydev, HCLGE_PHY_CSC_REG); - mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, - HCLGE_PHY_MDIX_CTRL_S); + mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); retval = phy_read(phydev, HCLGE_PHY_CSS_REG); - mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); - is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); @@ -5531,7 +5479,6 @@ static int hclge_pci_init(struct hclge_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; - hw->back = hdev; hw->io_base = pcim_iomap(pdev, 2, 0); if (!hw->io_base) { dev_err(&pdev->dev, "Can't map configuration register space\n"); @@ -5562,6 +5509,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) pci_disable_device(pdev); } +static void hclge_state_init(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); + clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); +} + +static void hclge_state_uninit(struct hclge_dev *hdev) +{ + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + if (hdev->service_timer.function) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + if (hdev->rst_service_task.func) + cancel_work_sync(&hdev->rst_service_task); + if (hdev->mbx_service_task.func) + cancel_work_sync(&hdev->mbx_service_task); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -5577,8 +5548,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; - hdev->reset_request = 0; - hdev->reset_pending = 0; ae_dev->priv = hdev; ret = hclge_pci_init(hdev); @@ -5702,12 +5671,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); - set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); - set_bit(HCLGE_STATE_DOWN, &hdev->state); - clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); - clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); + hclge_state_init(hdev); pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -5812,16 +5776,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; - set_bit(HCLGE_STATE_DOWN, &hdev->state); - - if (hdev->service_timer.function) - del_timer_sync(&hdev->service_timer); - if (hdev->service_task.func) - cancel_work_sync(&hdev->service_task); - if (hdev->rst_service_task.func) - cancel_work_sync(&hdev->rst_service_task); - if (hdev->mbx_service_task.func) - cancel_work_sync(&hdev->mbx_service_task); + hclge_state_uninit(hdev); if (mac->phydev) mdiobus_unregister(mac->mdio_bus); @@ -5905,6 +5860,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) u32 *rss_indir; int ret, i; + /* Free old tqps, and reallocate with new tqp number when nic setup */ hclge_release_tqp(vport); ret = hclge_knic_setup(vport, new_tqps_num); @@ -6149,8 +6105,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, - HCLGE_LED_LOCATE_STATE_S, locate_led_status); + hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, + HCLGE_LED_LOCATE_STATE_S, locate_led_status); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6280,7 +6236,6 @@ static const struct hnae3_ae_ops hclge_ops = { static struct hnae3_ae_algo ae_algo = { .ops = &hclge_ops, - .name = HCLGE_NAME, .pdev_id_table = ae_algo_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7488534528cd..dfa5c9456d22 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MAIN_H #define __HCLGE_MAIN_H @@ -40,7 +34,7 @@ #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 -#define HCLGE_RSS_HASH_ALGO_MASK 0xf +#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) #define HCLGE_RSS_CFG_TBL_NUM \ (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) @@ -77,11 +71,11 @@ /* Copper Specific Status Register */ #define HCLGE_PHY_CSS_REG 17 -#define HCLGE_PHY_MDIX_CTRL_S (5) +#define HCLGE_PHY_MDIX_CTRL_S 5 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) -#define HCLGE_PHY_MDIX_STATUS_B (6) -#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) +#define HCLGE_PHY_MDIX_STATUS_B 6 +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 /* Factor used to calculate offset and bitmap of VF num */ #define HCLGE_VF_NUM_PER_CMD 64 @@ -89,9 +83,10 @@ /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 -#define HCLGE_GLOBAL_RESET_BIT 0x0 -#define HCLGE_CORE_RESET_BIT 0x1 +#define HCLGE_GLOBAL_RESET_BIT 0 +#define HCLGE_CORE_RESET_BIT 1 #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 @@ -128,6 +123,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_STATISTICS_UPDATING, + HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_MAX }; @@ -138,12 +134,6 @@ enum hclge_evt_cause { }; #define HCLGE_MPF_ENBALE 1 -struct hclge_caps { - u16 num_tqp; - u16 num_buffer_cell; - u32 flag; - u16 vmdq; -}; enum HCLGE_MAC_SPEED { HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ @@ -189,8 +179,6 @@ struct hclge_hw { struct hclge_mac mac; int num_vec; struct hclge_cmq cmq; - struct hclge_caps caps; - void *back; }; /* TQP stats */ @@ -202,7 +190,10 @@ struct hlcge_tqp_stats { }; struct hclge_tqp { - struct device *dev; /* Device for DMA mapping */ + /* copy of device pointer from pci_dev, + * used when perform DMA mapping + */ + struct device *dev; struct hnae3_queue q; struct hlcge_tqp_stats tqp_stats; u16 index; /* Global index in a NIC controller */ @@ -492,13 +483,11 @@ struct hclge_dev { u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */ - /* Base task tqp physical id of this PF */ - u16 base_tqp_pid; + u16 base_tqp_pid; /* Base task tqp physical id of this PF */ u16 alloc_rss_size; /* Allocated RSS task queue */ u16 rss_size_max; /* HW defined max RSS task queue */ - /* Num of guaranteed filters for this PF */ - u16 fdir_pf_filter_count; + u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; @@ -560,7 +549,7 @@ struct hclge_dev { u32 mps; /* Max packet size */ enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Mutilcast filter enable */ + bool enable_mta; /* Multicast filter enable */ struct hclge_vlan_type_cfg vlan_type_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 7541cb9b71ce..f34851c91eb3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) } } -/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message +/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx + * from mailbox message * msg[0]: opcode * msg[1]: <not relevant to this function> * msg[2]: ring_num * msg[3]: first ring type (TX|RX) * msg[4]: first tqp id - * msg[5] ~ msg[14]: other ring type and tqp id + * msg[5]: first int_gl idx + * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx */ static int hclge_get_ring_chain_from_mbx( struct hclge_mbx_vf_to_pf_cmd *req, @@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx( HCLGE_MBX_RING_NODE_VARIABLE_NUM)) return -ENOMEM; - hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); - hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[5]); + hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg[5]); cur_chain = ring_chain; @@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx( if (!new_chain) goto err; - hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); + hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); new_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); - hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, - HCLGE_INT_GL_IDX_S, - req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + - HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); + hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + + HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); cur_chain->next = new_chain; cur_chain = new_chain; @@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 9f7932e423b5..2065ee2fd358 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> #include <linux/kernel.h> @@ -67,16 +61,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); mdio_cmd->data_wr = cpu_to_le16(data); @@ -105,16 +99,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; - hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, - HCLGE_MDIO_PHYID_S, phyid); - hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, - HCLGE_MDIO_PHYREG_S, regnum); + hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); - hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, - HCLGE_MDIO_CTRL_ST_S, 1); - hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, - HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); /* Read out phy data */ ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -125,7 +119,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) return ret; } - if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { dev_err(&hdev->pdev->dev, "mdio read data error\n"); return -EIO; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index c5e91cfb8f2c..bb3ce35e0d66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_MDIO_H #define __HCLGE_MDIO_H diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 262c125f8137..5db70a1451c5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> @@ -1184,10 +1178,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; - grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, - HCLGE_BP_GRP_ID_S); - sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, - HCLGE_BP_SUB_GRP_ID_S); + grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); if (i == grp) qs_bitmap |= (1 << sub_grp); @@ -1223,6 +1217,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) tx_en = true; rx_en = true; break; + case HCLGE_FC_PFC: + tx_en = false; + rx_en = false; + break; default: tx_en = true; rx_en = true; @@ -1240,8 +1238,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (ret) return ret; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) - return hclge_mac_pause_setup_hw(hdev); + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index c2b6e8a6700f..dd4c194747c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -1,11 +1,5 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2016-2017 Hisilicon Limited. #ifndef __HCLGE_TM_H #define __HCLGE_TM_H @@ -123,10 +117,11 @@ struct hclge_port_shapping_cmd { }; #define hclge_tm_set_field(dest, string, val) \ - hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ - (HCLGE_TM_SHAP_##string##_LSH), val) + hnae3_set_field((dest), \ + (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) #define hclge_tm_get_field(src, string) \ - hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 1bbfe131b596..fb471fe2c494 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), + size, &ring->desc_dma_addr, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - return 0; } static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) { - dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - hclgevf_ring_to_dma_dir(ring)); + int size = ring->desc_num * sizeof(struct hclgevf_desc); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; + if (ring->desc) { + dma_free_coherent(cmq_ring_to_dev(ring), size, + ring->desc, ring->desc_dma_addr); + ring->desc = NULL; + } } static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a17872aab168..d1f16f0c1646 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) { + if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { + dev_warn(&hdev->pdev->dev, + "vector(vector_id %d) has been freed.\n", vector_id); + return; + } + hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; hdev->num_msi_left += 1; hdev->num_msi_used -= 1; @@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { - hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, - HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, - HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); + hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, + HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, + HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); } status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle) } static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, - int vector, + int vector_id, struct hnae3_ring_chain_node *ring_chain) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hnae3_ring_chain_node *node; struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int i = 0, vector_id; + int i = 0; int status; u8 type; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - vector_id = hclgevf_get_vector_index(hdev, vector); - if (vector_id < 0) { - dev_err(&handle->pdev->dev, - "Get vector index fail. ret =%d\n", vector_id); - return vector_id; - } for (node = ring_chain; node; node = node->next) { int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + @@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, } req->msg[idx_offset] = - hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); + hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); req->msg[idx_offset + 1] = node->tqp_index; - req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S); + req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S); i++; if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - @@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, struct hnae3_ring_chain_node *ring_chain) { - return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; + + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); } static int hclgevf_unmap_ring_from_vector( @@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector( return vector_id; } - ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); + ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); if (ret) dev_err(&handle->pdev->dev, "Unmap ring from vector fail. vector=%d, ret =%d\n", @@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector( static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int vector_id; - hclgevf_free_vector(hdev, vector); + vector_id = hclgevf_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "hclgevf_put_vector get vector index fail. ret =%d\n", + vector_id); + return vector_id; + } + + hclgevf_free_vector(hdev, vector_id); return 0; } @@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* wait to check the hardware reset completion status */ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { + while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && + (cnt < HCLGEVF_RESET_WAIT_CNT)) { msleep(HCLGEVF_RESET_WAIT_MS); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); cnt++; @@ -1582,9 +1601,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } -static int hclgevf_init_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static int hclgevf_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; int ret; switch (client->type) { @@ -1635,9 +1655,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return 0; } -static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, - struct hnae3_client *client) +static void hclgevf_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { + struct hclgevf_dev *hdev = ae_dev->priv; + /* un-init roce, if it exists */ if (hdev->roce_client) hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); @@ -1648,22 +1670,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev, client->ops->uninit_instance(&hdev->nic, 0); } -static int hclgevf_register_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - return hclgevf_init_instance(hdev, client); -} - -static void hclgevf_unregister_client(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev) -{ - struct hclgevf_dev *hdev = ae_dev->priv; - - hclgevf_uninit_instance(hdev, client); -} - static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -1924,8 +1930,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, - .init_client_instance = hclgevf_register_client, - .uninit_client_instance = hclgevf_unregister_client, + .init_client_instance = hclgevf_init_client_instance, + .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, @@ -1962,7 +1968,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { static struct hnae3_ae_algo ae_algovf = { .ops = &hclgevf_ops, - .name = HCLGEVF_NAME, .pdev_id_table = ae_algovf_pci_tbl, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index b598c06af8e0..e9d5a4f96304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %d\n", req->msg[0]); @@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* tail the async message in arq */ msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); + memcpy(&msg_q[0], req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); hdev->arq.count++; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 79b567447084..6b19607a4caa 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_cmd_fw_ctxt fw_ctxt; - struct hinic_pfhwdev *pfhwdev; u16 out_size; int err; @@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; - pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); - err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, &fw_ctxt, sizeof(fw_ctxt), &fw_ctxt, &out_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c944bd10b03d..51762428b40e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, - np, np); + np, np, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); return 0; @@ -11841,7 +11841,6 @@ static int i40e_xdp(struct net_device *dev, case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: @@ -11978,7 +11977,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); - random_ether_addr(mac_addr); + eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_add_mac_filter(vsi, mac_addr); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index a7b87f935411..5906c1c1d19d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb, adapter); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b13b42e5a1d9..a795c07d0df7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -225,19 +225,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; - /* Make sure the PHY is in a good state. Several people have reported - * firmware leaving the PHY's page select register set to something - * other than the default of zero, which causes the PHY ID read to - * access something other than the intended register. - */ - ret_val = hw->phy.ops.reset(hw); - if (ret_val) { - hw_dbg("Error resetting the PHY.\n"); - goto out; - } - /* Set phy->phy_addr and phy->id. */ - igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); ret_val = igb_get_phy_id_82575(hw); if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 252440a418dc..8a28f3388f69 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1048,6 +1048,22 @@ #define E1000_TQAVCTRL_XMIT_MODE BIT(0) #define E1000_TQAVCTRL_DATAFETCHARB BIT(4) #define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) /* TX Qav Credit Control fields */ #define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9643b5b3d444..ca54e268d157 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -262,6 +262,7 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ bool cbs_enable; /* indicates if CBS is enabled */ s32 idleslope; /* idleSlope in kbps */ s32 sendslope; /* sendSlope in kbps */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f707709969ac..25720d95d4ea 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1654,33 +1654,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) wr32(E1000_I210_TQAVCC(queue), val); } +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + /** - * igb_configure_cbs - Configure Credit-Based Shaper (CBS) + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb * @adapter: pointer to adapter struct * @queue: queue number - * @enable: true = enable CBS, false = disable CBS - * @idleslope: idleSlope in kbps - * @sendslope: sendSlope in kbps - * @hicredit: hiCredit in bytes - * @locredit: loCredit in bytes * - * Configure CBS for a given hardware queue. When disabling, idleslope, - * sendslope, hicredit, locredit arguments are ignored. Returns 0 if - * success. Negative otherwise. + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. **/ -static void igb_configure_cbs(struct igb_adapter *adapter, int queue, - bool enable, int idleslope, int sendslope, - int hicredit, int locredit) +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { + struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - u32 tqavcc; + u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable || queue == 0) { + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { /* i210 does not allow the queue 0 to be in the Strict * Priority mode while the Qav mode is enabled, so, * instead of disabling strict priority mode, we give @@ -1690,14 +1722,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * Queue0 QueueMode must be set to 1b when * TransmitMode is set to Qav." */ - if (queue == 0 && !enable) { + if (queue == 0 && !ring->cbs_enable) { /* max "linkspeed" idleslope in kbps */ - idleslope = 1000000; - hicredit = ETH_FRAME_LEN; + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; } - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); - set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); /* According to i210 datasheet section 7.2.7.7, we should set * the 'idleSlope' field from TQAVCC register following the @@ -1756,17 +1793,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * calculated value, so the resulting bandwidth might * be slightly higher for some configurations. */ - value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); tqavcc = rd32(E1000_I210_TQAVCC(queue)); tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; tqavcc |= value; wr32(E1000_I210_TQAVCC(queue), tqavcc); - wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735); + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); } else { - set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); - set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); /* Set idleSlope to zero. */ tqavcc = rd32(E1000_I210_TQAVCC(queue)); @@ -1775,6 +1811,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, /* Set hiCredit to zero. */ wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } } /* XXX: In i210 controller the sendSlope and loCredit parameters from @@ -1782,9 +1855,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, * configuration' in respect to these parameters. */ - netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", - (enable) ? "enabled" : "disabled", queue, - idleslope, sendslope, hicredit, locredit); + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ + idleslope %d sendslope %d hiCredit %d \ + locredit %d\n", + (ring->cbs_enable) ? "enabled" : "disabled", + (ring->launchtime_enable) ? "enabled" : "disabled", queue, + ring->idleslope, ring->sendslope, ring->hicredit, + ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; } static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, @@ -1807,21 +1898,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, return 0; } -static bool is_any_cbs_enabled(struct igb_adapter *adapter) -{ - struct igb_ring *ring; - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - - if (ring->cbs_enable) - return true; - } - - return false; -} - +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ static void igb_setup_tx_mode(struct igb_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1836,11 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) int i, max_queue; /* Configure TQAVCTRL register: set transmit mode to 'Qav', - * set data fetch arbitration to 'round robin' and set data - * transfer arbitration to 'credit shaper algorithm. + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. */ val = rd32(E1000_I210_TQAVCTRL); - val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB; + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; val &= ~E1000_TQAVCTRL_DATAFETCHARB; wr32(E1000_I210_TQAVCTRL, val); @@ -1881,11 +1966,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) adapter->num_tx_queues : I210_SR_QUEUES_NUM; for (i = 0; i < max_queue; i++) { - struct igb_ring *ring = adapter->tx_ring[i]; - - igb_configure_cbs(adapter, i, ring->cbs_enable, - ring->idleslope, ring->sendslope, - ring->hicredit, ring->locredit); + igb_config_tx_modes(adapter, i); } } else { wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); @@ -2459,6 +2540,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + static int igb_offload_cbs(struct igb_adapter *adapter, struct tc_cbs_qopt_offload *qopt) { @@ -2479,17 +2573,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter, if (err) return err; - if (is_fqtss_enabled(adapter)) { - igb_configure_cbs(adapter, qopt->queue, qopt->enable, - qopt->idleslope, qopt->sendslope, - qopt->hicredit, qopt->locredit); - - if (!is_any_cbs_enabled(adapter)) - enable_fqtss(adapter, false); - - } else { - enable_fqtss(adapter, true); - } + igb_offload_apply(adapter, qopt->queue); return 0; } @@ -2698,7 +2782,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, case TC_CLSFLOWER_STATS: return -EOPNOTSUPP; default: - return -EINVAL; + return -EOPNOTSUPP; } } @@ -2728,7 +2812,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, adapter); @@ -2738,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter, } } +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2748,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, return igb_offload_cbs(adapter, type_data); case TC_SETUP_BLOCK: return igb_setup_tc_block(adapter, type_data); + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); default: return -EOPNOTSUPP; @@ -5568,11 +5677,14 @@ set_itr_now: } } -static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; + struct timespec64 ts; context_desc = IGB_TX_CTXTDESC(tx_ring, i); @@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } } static int igb_tso(struct igb_ring *tx_ring, @@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); return 1; } @@ -5727,7 +5849,7 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ @@ -5909,7 +6031,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, * We also need this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written. */ - wmb(); + dma_wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -6015,8 +6137,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -6032,6 +6152,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); + skb_tx_timestamp(skb); + if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; @@ -8409,7 +8531,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) * applicable for weak-ordered memory model archs, * such as IA-64). */ - wmb(); + dma_wmb(); writel(i, rx_ring->tail); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 144d5fe6b944..4fc906c6166b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -855,7 +855,8 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *); void ixgbe_free_tx_resources(struct ixgbe_ring *); void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_disable_rx(struct ixgbe_adapter *adapter); +void ixgbe_disable_tx(struct ixgbe_adapter *adapter); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bd1ba88ec1d5..e5a8461fe6a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -511,7 +511,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) static int ixgbe_get_regs_len(struct net_device *netdev) { -#define IXGBE_REGS_LEN 1139 +#define IXGBE_REGS_LEN 1145 return IXGBE_REGS_LEN * sizeof(u32); } @@ -874,6 +874,14 @@ static void ixgbe_get_regs(struct net_device *netdev, /* X540 specific DCB registers */ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); + + /* Security config registers */ + regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); + regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); } static int ixgbe_get_eeprom_len(struct net_device *netdev) @@ -1690,35 +1698,17 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) { - struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; - struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg_ctl; - - /* shut down the DMA engines now so they can be reinitialized later */ + /* Shut down the DMA engines now so they can be reinitialized later, + * since the test rings and normally used rings should overlap on + * queue 0 we can just use the standard disable Rx/Tx calls and they + * will take care of disabling the test rings for us. + */ /* first Rx */ - hw->mac.ops.disable_rx(hw); - ixgbe_disable_rx_queue(adapter, rx_ring); + ixgbe_disable_rx(adapter); /* now Tx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); - reg_ctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); - - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - reg_ctl &= ~IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); - break; - default: - break; - } + ixgbe_disable_tx(adapter); ixgbe_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 62e57b05a0ae..447098005490 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4022,38 +4022,6 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, } } -void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) -{ - struct ixgbe_hw *hw = &adapter->hw; - int wait_loop = IXGBE_MAX_RX_DESC_POLL; - u32 rxdctl; - u8 reg_idx = ring->reg_idx; - - if (ixgbe_removed(hw->hw_addr)) - return; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - rxdctl &= ~IXGBE_RXDCTL_ENABLE; - - /* write value back with RXDCTL.ENABLE bit cleared */ - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); - - if (hw->mac.type == ixgbe_mac_82598EB && - !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) - return; - - /* the hardware may take up to 100us to really disable the rx queue */ - do { - udelay(10); - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); - - if (!wait_loop) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " - "the polling period\n", reg_idx); - } -} - void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -4063,9 +4031,13 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; - /* disable queue to avoid issues while updating state */ + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - ixgbe_disable_rx_queue(adapter, ring); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); @@ -5275,6 +5247,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int num_tc = netdev_get_num_tc(adapter->netdev); struct net_device *vdev = accel->netdev; int i, baseq, err; @@ -5286,6 +5260,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; + /* record configuration for macvlan interface in vdev */ + for (i = 0; i < num_tc; i++) + netdev_bind_sb_channel_queue(adapter->netdev, vdev, + i, rss_i, baseq + (rss_i * i)); + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = vdev; @@ -5310,6 +5289,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); @@ -5622,6 +5605,212 @@ void ixgbe_up(struct ixgbe_adapter *adapter) ixgbe_up_complete(adapter); } +static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter) +{ + u16 devctl2; + + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); + + switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) { + case IXGBE_PCIDEVCTRL2_17_34s: + case IXGBE_PCIDEVCTRL2_4_8s: + /* For now we cap the upper limit on delay to 2 seconds + * as we end up going up to 34 seconds of delay in worst + * case timeout value. + */ + case IXGBE_PCIDEVCTRL2_1_2s: + return 2000000ul; /* 2.0 s */ + case IXGBE_PCIDEVCTRL2_260_520ms: + return 520000ul; /* 520 ms */ + case IXGBE_PCIDEVCTRL2_65_130ms: + return 130000ul; /* 130 ms */ + case IXGBE_PCIDEVCTRL2_16_32ms: + return 32000ul; /* 32 ms */ + case IXGBE_PCIDEVCTRL2_1_2ms: + return 2000ul; /* 2 ms */ + case IXGBE_PCIDEVCTRL2_50_100us: + return 100ul; /* 100 us */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: + return 32000ul; /* 32 ms */ + default: + break; + } + + /* We shouldn't need to hit this path, but just in case default as + * though completion timeout is not supported and support 32ms. + */ + return 32000ul; +} + +void ixgbe_disable_rx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 rxdctl; + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + } + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = 0; + + /* OR together the reading of all the active RXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u8 reg_idx = ring->reg_idx; + + rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, + "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); +} + +void ixgbe_disable_tx(struct ixgbe_adapter *adapter) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + int i, wait_loop; + u32 txdctl; + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* disable all enabled Tx queues */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* disable all enabled XDP Tx queues */ + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* If the link is not up there shouldn't be much in the way of + * pending transactions. Those that are left will be flushed out + * when the reset logic goes through the flush sequence to clean out + * the pending Tx transactions. + */ + if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + goto dma_engine_disable; + + /* Determine our minimum delay interval. We will increase this value + * with each subsequent test. This way if the device returns quickly + * we should spend as little time as possible waiting, however as + * the time increases we will wait for larger periods of time. + * + * The trick here is that we increase the interval using the + * following pattern: 1x 3x 5x 7x 9x 11x 13x 15x 17x 19x. The result + * of that wait is that it totals up to 100x whatever interval we + * choose. Since our minimum wait is 100us we can just divide the + * total timeout by 100 to get our minimum delay interval. + */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = 0; + + /* OR together the reading of all the active TXDCTL registers, + * and then test the result. We need the disable to complete + * before we start freeing the memory and invalidating the + * DMA mappings. + */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + for (i = 0; i < adapter->num_xdp_queues; i++) { + struct ixgbe_ring *ring = adapter->xdp_ring[i]; + u8 reg_idx = ring->reg_idx; + + txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + goto dma_engine_disable; + } + + e_err(drv, + "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); + +dma_engine_disable: + /* Disable the Tx DMA engine on 82599 and later MAC */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + /* fall through */ + default: + break; + } +} + void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -5803,24 +5992,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter) if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable receives */ - hw->mac.ops.disable_rx(hw); + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); - /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) - /* this call also flushes the previous write */ - ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); - usleep_range(10000, 20000); + /* Disable Rx */ + ixgbe_disable_rx(adapter); /* synchronize_sched() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) synchronize_sched(); - netif_tx_stop_all_queues(netdev); - - /* call carrier off first to avoid false dev_watchdog timeouts */ - netif_carrier_off(netdev); - netif_tx_disable(netdev); ixgbe_irq_disable(adapter); @@ -5848,30 +6032,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { - u8 reg_idx = adapter->tx_ring[i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - for (i = 0; i < adapter->num_xdp_queues; i++) { - u8 reg_idx = adapter->xdp_ring[i]->reg_idx; - - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); - } - - /* Disable the Tx DMA engine on 82599 and later MAC */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); - break; - default: - break; - } + ixgbe_disable_tx(adapter); if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); @@ -6458,6 +6619,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (adapter->xdp_prog) { + e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); + return -EPERM; + } + /* * For 82599EB we cannot allow legacy VFs to enable their receive * paths when MTU greater than 1500 is configured. So display a @@ -8197,25 +8363,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } +#ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { - struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; struct ixgbe_adapter *adapter; - int txq; -#ifdef IXGBE_FCOE struct ixgbe_ring_feature *f; -#endif + int txq; - if (fwd_adapter) { - adapter = netdev_priv(dev); - txq = reciprocal_scale(skb_get_hash(skb), - adapter->num_rx_queues_per_pool); + if (sb_dev) { + u8 tc = netdev_get_prio_tc_map(dev, skb->priority); + struct net_device *vdev = sb_dev; - return txq + fwd_adapter->tx_base_queue; - } + txq = vdev->tc_to_txq[tc].offset; + txq += reciprocal_scale(skb_get_hash(skb), + vdev->tc_to_txq[tc].count); -#ifdef IXGBE_FCOE + return txq; + } /* * only execute the code below if protocol is FCoE @@ -8226,11 +8392,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, case htons(ETH_P_FIP): adapter = netdev_priv(dev); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; /* fall through */ default: - return fallback(dev, skb); + return fallback(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -8242,11 +8408,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, txq -= f->indices; return txq + f->offset; -#else - return fallback(dev, skb); -#endif } +#endif static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf) { @@ -8766,6 +8930,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) /* if we cannot find a free pool then disable the offload */ netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); macvlan_release_l2fw_offload(vdev); + + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + kfree(accel); return 0; @@ -9329,7 +9498,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, - adapter, adapter); + adapter, adapter, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, adapter); @@ -9393,6 +9562,11 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) features &= ~NETIF_F_LRO; + if (adapter->xdp_prog && (features & NETIF_F_LRO)) { + e_dev_err("LRO is not supported with XDP\n"); + features &= ~NETIF_F_LRO; + } + return features; } @@ -9769,6 +9943,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!macvlan_supports_dest_filter(vdev)) return ERR_PTR(-EMEDIUMTYPE); + /* We need to lock down the macvlan to be a single queue device so that + * we can reuse the tc_to_txq field in the macvlan netdev to represent + * the queue mapping to our netdev. + */ + if (netif_is_multiqueue(vdev)) + return ERR_PTR(-ERANGE); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); if (pool == adapter->num_rx_pools) { u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; @@ -9825,6 +10006,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) return ERR_PTR(-ENOMEM); set_bit(pool, adapter->fwd_bitmask); + netdev_set_sb_channel(vdev, pool); accel->pool = pool; accel->netdev = vdev; @@ -9866,6 +10048,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) ring->netdev = NULL; } + /* unbind the queues and drop the subordinate channel config */ + netdev_unbind_sb_channel(pdev, accel->netdev); + netdev_set_sb_channel(accel->netdev, 0); + clear_bit(accel->pool, adapter->fwd_bitmask); kfree(accel); } @@ -9966,7 +10152,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; @@ -10026,7 +10211,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, - .ndo_select_queue = ixgbe_select_queue, .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -10049,6 +10233,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE + .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 59416eddd840..d86446d202d5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!(adapter->xdp_prog); xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 06ff185eb188..a5ab6f3403ae 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1911,10 +1911,10 @@ jme_wait_link(struct jme_adapter *jme) { u32 phylink, to = JME_WAIT_LINK_TIME; - mdelay(1000); + msleep(1000); phylink = jme_linkstat_from_phy(jme); while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { - mdelay(10); + usleep_range(10000, 11000); phylink = jme_linkstat_from_phy(jme); } } diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index afc810069440..7a637b51c7d2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); } -static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - /* we are currently only using the first queue */ - return 0; -} - static int ltq_etop_init(struct net_device *dev) { @@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = { .ndo_set_mac_address = ltq_etop_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ltq_etop_set_multicast_list, - .ndo_select_queue = ltq_etop_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_init = ltq_etop_init, .ndo_tx_timeout = ltq_etop_tx_timeout, }; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0ad2f3f7da85..55c2a56c5dae 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -295,10 +295,10 @@ #define MVNETA_RSS_LU_TABLE_SIZE 1 /* Max number of Rx descriptors */ -#define MVNETA_MAX_RXD 128 +#define MVNETA_MAX_RXD 512 /* Max number of Tx descriptors */ -#define MVNETA_MAX_TXD 532 +#define MVNETA_MAX_TXD 1024 /* Max number of allowed TCP segments for software TSO */ #define MVNETA_MAX_TSO_SEGS 100 @@ -328,6 +328,8 @@ enum { ETHTOOL_STAT_EEE_WAKEUP, + ETHTOOL_STAT_SKB_ALLOC_ERR, + ETHTOOL_STAT_REFILL_ERR, ETHTOOL_MAX_STATS, }; @@ -375,6 +377,8 @@ static const struct mvneta_statistic mvneta_statistics[] = { { 0x3054, T_REG_32, "fc_sent", }, { 0x300c, T_REG_32, "internal_mac_transmit_err", }, { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, + { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", }, + { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", }, }; struct mvneta_pcpu_stats { @@ -479,7 +483,10 @@ struct mvneta_port { #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) #define MVNETA_RXD_L3_IP4 BIT(25) -#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) +#define MVNETA_RXD_LAST_DESC BIT(26) +#define MVNETA_RXD_FIRST_DESC BIT(27) +#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ + MVNETA_RXD_LAST_DESC) #define MVNETA_RXD_L4_CSUM_OK BIT(30) #if defined(__LITTLE_ENDIAN) @@ -589,9 +596,6 @@ struct mvneta_rx_queue { /* num of rx descriptors in the rx descriptor ring */ int size; - /* counter of times when mvneta_refill() failed */ - int missed; - u32 pkts_coal; u32 time_coal; @@ -609,6 +613,18 @@ struct mvneta_rx_queue { /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; + + /* Index of first RX DMA descriptor to refill */ + int first_to_refill; + u32 refill_num; + + /* pointer to uncomplete skb buffer */ + struct sk_buff *skb; + int left_size; + + /* error counters */ + u32 skb_alloc_err; + u32 refill_err; }; static enum cpuhp_state online_hpstate; @@ -621,6 +637,7 @@ static int txq_number = 8; static int rxq_def; static int rx_copybreak __read_mostly = 256; +static int rx_header_size __read_mostly = 128; /* HW BM need that each port be identify by a unique ID */ static int global_port_id; @@ -1684,13 +1701,6 @@ static void mvneta_rx_error(struct mvneta_port *pp, { u32 status = rx_desc->status; - if (!mvneta_rxq_desc_is_first_last(status)) { - netdev_err(pp->dev, - "bad rx status %08x (buffer oversize), size=%d\n", - status, rx_desc->data_size); - return; - } - switch (status & MVNETA_RXD_ERR_CODE_MASK) { case MVNETA_RXD_ERR_CRC: netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", @@ -1715,7 +1725,8 @@ static void mvneta_rx_error(struct mvneta_port *pp, static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, struct sk_buff *skb) { - if ((status & MVNETA_RXD_L3_IP4) && + if ((pp->dev->features & NETIF_F_RXCSUM) && + (status & MVNETA_RXD_L3_IP4) && (status & MVNETA_RXD_L4_CSUM_OK)) { skb->csum = 0; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1790,47 +1801,30 @@ static void mvneta_txq_done(struct mvneta_port *pp, } } -void *mvneta_frag_alloc(unsigned int frag_size) -{ - if (likely(frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(frag_size); - else - return kmalloc(frag_size, GFP_ATOMIC); -} -EXPORT_SYMBOL_GPL(mvneta_frag_alloc); - -void mvneta_frag_free(unsigned int frag_size, void *data) -{ - if (likely(frag_size <= PAGE_SIZE)) - skb_free_frag(data); - else - kfree(data); -} -EXPORT_SYMBOL_GPL(mvneta_frag_free); - /* Refill processing for SW buffer management */ +/* Allocate page per descriptor */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, - struct mvneta_rx_queue *rxq) - + struct mvneta_rx_queue *rxq, + gfp_t gfp_mask) { dma_addr_t phys_addr; - void *data; + struct page *page; - data = mvneta_frag_alloc(pp->frag_size); - if (!data) + page = __dev_alloc_page(gfp_mask); + if (!page) return -ENOMEM; - phys_addr = dma_map_single(pp->dev->dev.parent, data, - MVNETA_RX_BUF_SIZE(pp->pkt_size), - DMA_FROM_DEVICE); + /* map page for use */ + phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - mvneta_frag_free(pp->frag_size, data); + __free_page(page); return -ENOMEM; } phys_addr += pp->rx_offset_correction; - mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq); + mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; } @@ -1893,115 +1887,192 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = rxq->buf_virt_addr[i]; + if (!data || !(rx_desc->buf_phys_addr)) + continue; dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - mvneta_frag_free(pp->frag_size, data); + __free_page(data); } } +static inline +int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) +{ + struct mvneta_rx_desc *rx_desc; + int curr_desc = rxq->first_to_refill; + int i; + + for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { + rx_desc = rxq->descs + curr_desc; + if (!(rx_desc->buf_phys_addr)) { + if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { + pr_err("Can't refill queue %d. Done %d from %d\n", + rxq->id, i, rxq->refill_num); + rxq->refill_err++; + break; + } + } + curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); + } + rxq->refill_num -= i; + rxq->first_to_refill = curr_desc; + + return i; +} + /* Main rx processing when using software buffer management */ -static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, +static int mvneta_rx_swbm(struct napi_struct *napi, + struct mvneta_port *pp, int budget, struct mvneta_rx_queue *rxq) { - struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; - int rx_done; + int rx_todo, rx_proc; + int refill = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; /* Get number of received packets */ - rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); - - if (rx_todo > rx_done) - rx_todo = rx_done; - - rx_done = 0; + rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); + rx_proc = 0; /* Fairness NAPI loop */ - while (rx_done < rx_todo) { + while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); - struct sk_buff *skb; unsigned char *data; + struct page *page; dma_addr_t phys_addr; - u32 rx_status, frag_size; - int rx_bytes, err, index; + u32 rx_status, index; + int rx_bytes, skb_size, copy_size; + int frag_num, frag_size, frag_offset; - rx_done++; - rx_status = rx_desc->status; - rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; - data = rxq->buf_virt_addr[index]; - phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; - - if (!mvneta_rxq_desc_is_first_last(rx_status) || - (rx_status & MVNETA_RXD_ERR_SUMMARY)) { - mvneta_rx_error(pp, rx_desc); -err_drop_frame: - dev->stats.rx_errors++; - /* leave the descriptor untouched */ - continue; - } - - if (rx_bytes <= rx_copybreak) { - /* better copy a small frame and not unmap the DMA region */ - skb = netdev_alloc_skb_ip_align(dev, rx_bytes); - if (unlikely(!skb)) - goto err_drop_frame; - - dma_sync_single_range_for_cpu(dev->dev.parent, - phys_addr, - MVNETA_MH_SIZE + NET_SKB_PAD, - rx_bytes, - DMA_FROM_DEVICE); - skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD, - rx_bytes); + page = (struct page *)rxq->buf_virt_addr[index]; + data = page_address(page); + /* Prefetch header */ + prefetch(data); - skb->protocol = eth_type_trans(skb, dev); - mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); - - rcvd_pkts++; - rcvd_bytes += rx_bytes; + phys_addr = rx_desc->buf_phys_addr; + rx_status = rx_desc->status; + rx_proc++; + rxq->refill_num++; + + if (rx_status & MVNETA_RXD_FIRST_DESC) { + /* Check errors only for FIRST descriptor */ + if (rx_status & MVNETA_RXD_ERR_SUMMARY) { + mvneta_rx_error(pp, rx_desc); + dev->stats.rx_errors++; + /* leave the descriptor untouched */ + continue; + } + rx_bytes = rx_desc->data_size - + (ETH_FCS_LEN + MVNETA_MH_SIZE); + + /* Allocate small skb for each new packet */ + skb_size = max(rx_copybreak, rx_header_size); + rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size); + if (unlikely(!rxq->skb)) { + netdev_err(dev, + "Can't allocate skb on queue %d\n", + rxq->id); + dev->stats.rx_dropped++; + rxq->skb_alloc_err++; + continue; + } + copy_size = min(skb_size, rx_bytes); + + /* Copy data from buffer to SKB, skip Marvell header */ + memcpy(rxq->skb->data, data + MVNETA_MH_SIZE, + copy_size); + skb_put(rxq->skb, copy_size); + rxq->left_size = rx_bytes - copy_size; + + mvneta_rx_csum(pp, rx_status, rxq->skb); + if (rxq->left_size == 0) { + int size = copy_size + MVNETA_MH_SIZE; + + dma_sync_single_range_for_cpu(dev->dev.parent, + phys_addr, 0, + size, + DMA_FROM_DEVICE); + + /* leave the descriptor and buffer untouched */ + } else { + /* refill descriptor with new buffer later */ + rx_desc->buf_phys_addr = 0; + + frag_num = 0; + frag_offset = copy_size + MVNETA_MH_SIZE; + frag_size = min(rxq->left_size, + (int)(PAGE_SIZE - frag_offset)); + skb_add_rx_frag(rxq->skb, frag_num, page, + frag_offset, frag_size, + PAGE_SIZE); + dma_unmap_single(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); + rxq->left_size -= frag_size; + } + } else { + /* Middle or Last descriptor */ + if (unlikely(!rxq->skb)) { + pr_debug("no skb for rx_status 0x%x\n", + rx_status); + continue; + } + if (!rxq->left_size) { + /* last descriptor has only FCS */ + /* and can be discarded */ + dma_sync_single_range_for_cpu(dev->dev.parent, + phys_addr, 0, + ETH_FCS_LEN, + DMA_FROM_DEVICE); + /* leave the descriptor and buffer untouched */ + } else { + /* refill descriptor with new buffer later */ + rx_desc->buf_phys_addr = 0; + + frag_num = skb_shinfo(rxq->skb)->nr_frags; + frag_offset = 0; + frag_size = min(rxq->left_size, + (int)(PAGE_SIZE - frag_offset)); + skb_add_rx_frag(rxq->skb, frag_num, page, + frag_offset, frag_size, + PAGE_SIZE); + + dma_unmap_single(dev->dev.parent, phys_addr, + PAGE_SIZE, + DMA_FROM_DEVICE); + + rxq->left_size -= frag_size; + } + } /* Middle or Last descriptor */ - /* leave the descriptor and buffer untouched */ + if (!(rx_status & MVNETA_RXD_LAST_DESC)) + /* no last descriptor this time */ continue; - } - /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc, rxq); - if (err) { - netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; - goto err_drop_frame; + if (rxq->left_size) { + pr_err("get last desc, but left_size (%d) != 0\n", + rxq->left_size); + dev_kfree_skb_any(rxq->skb); + rxq->left_size = 0; + rxq->skb = NULL; + continue; } - - frag_size = pp->frag_size; - - skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); - - /* After refill old buffer has to be unmapped regardless - * the skb is successfully built or not. - */ - dma_unmap_single(dev->dev.parent, phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), - DMA_FROM_DEVICE); - - if (!skb) - goto err_drop_frame; - rcvd_pkts++; - rcvd_bytes += rx_bytes; + rcvd_bytes += rxq->skb->len; /* Linux processing */ - skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); - skb_put(skb, rx_bytes); - - skb->protocol = eth_type_trans(skb, dev); + rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - mvneta_rx_csum(pp, rx_status, skb); + if (dev->features & NETIF_F_GRO) + napi_gro_receive(napi, rxq->skb); + else + netif_receive_skb(rxq->skb); - napi_gro_receive(&port->napi, skb); + /* clean uncomplete skb pointer in queue */ + rxq->skb = NULL; + rxq->left_size = 0; } if (rcvd_pkts) { @@ -2013,17 +2084,20 @@ err_drop_frame: u64_stats_update_end(&stats->syncp); } + /* return some buffers to hardware queue, one at a time is too slow */ + refill = mvneta_rx_refill_queue(pp, rxq); + /* Update rxq management counters */ - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); - return rx_done; + return rcvd_pkts; } /* Main rx processing when using hardware buffer management */ -static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, +static int mvneta_rx_hwbm(struct napi_struct *napi, + struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { - struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; int rx_done; u32 rcvd_pkts = 0; @@ -2085,7 +2159,7 @@ err_drop_frame: skb->protocol = eth_type_trans(skb, dev); mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -2102,7 +2176,7 @@ err_drop_frame: err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; + rxq->refill_err++; goto err_drop_frame_ret_pool; } @@ -2129,7 +2203,7 @@ err_drop_frame: mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -2722,9 +2796,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget) if (rx_queue) { rx_queue = rx_queue - 1; if (pp->bm_priv) - rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); + rx_done = mvneta_rx_hwbm(napi, pp, budget, + &pp->rxqs[rx_queue]); else - rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); + rx_done = mvneta_rx_swbm(napi, pp, budget, + &pp->rxqs[rx_queue]); } if (rx_done < budget) { @@ -2761,9 +2837,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, for (i = 0; i < num; i++) { memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); - if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) { - netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", - __func__, rxq->id, i, num); + if (mvneta_rx_refill(pp, rxq->descs + i, rxq, + GFP_KERNEL) != 0) { + netdev_err(pp->dev, + "%s:rxq %d, %d of %d buffs filled\n", + __func__, rxq->id, i, num); break; } } @@ -2821,21 +2899,23 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); - /* Set Offset */ - mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction); - /* Set coalescing pkts and time */ mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); if (!pp->bm_priv) { - /* Fill RXQ with buffers from RX pool */ - mvneta_rxq_buf_size_set(pp, rxq, - MVNETA_RX_BUF_SIZE(pp->pkt_size)); + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, 0); + mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); } else { + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, + NET_SKB_PAD - pp->rx_offset_correction); + mvneta_rxq_bm_enable(pp, rxq); + /* Fill RXQ with buffers from RX pool */ mvneta_rxq_long_pool_set(pp, rxq); mvneta_rxq_short_pool_set(pp, rxq); mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); @@ -2864,6 +2944,9 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, { mvneta_rxq_drop_pkts(pp, rxq); + if (rxq->skb) + dev_kfree_skb_any(rxq->skb); + if (rxq->descs) dma_free_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2874,6 +2957,10 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_phys = 0; + rxq->first_to_refill = 0; + rxq->refill_num = 0; + rxq->skb = NULL; + rxq->left_size = 0; } static int mvneta_txq_sw_init(struct mvneta_port *pp, @@ -3177,8 +3264,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mvneta_bm_update_mtu(pp, mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); - pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ret = mvneta_setup_rxqs(pp); if (ret) { @@ -3194,7 +3279,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) on_each_cpu(mvneta_percpu_enable, pp, true); mvneta_start_dev(pp); - mvneta_port_up(pp); netdev_update_features(dev); @@ -3666,8 +3750,7 @@ static int mvneta_open(struct net_device *dev) int ret; pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); - pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + pp->frag_size = PAGE_SIZE; ret = mvneta_setup_rxqs(pp); if (ret) @@ -3962,6 +4045,12 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) case ETHTOOL_STAT_EEE_WAKEUP: val = phylink_get_eee_err(pp->phylink); break; + case ETHTOOL_STAT_SKB_ALLOC_ERR: + val = pp->rxqs[0].skb_alloc_err; + break; + case ETHTOOL_STAT_REFILL_ERR: + val = pp->rxqs[0].refill_err; + break; } break; } @@ -4362,14 +4451,6 @@ static int mvneta_probe(struct platform_device *pdev) pp->dn = dn; pp->rxq_def = rxq_def; - - /* Set RX packet offset correction for platforms, whose - * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit - * platforms and 0B for 32-bit ones. - */ - pp->rx_offset_correction = - max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION); - pp->indir[0] = rxq_def; /* Get special SoC configurations */ @@ -4457,16 +4538,28 @@ static int mvneta_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; + pp->rx_offset_correction = 0; /* not relevant for SW BM */ /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); - if (bm_node && bm_node->data) { - pp->bm_priv = bm_node->data; - err = mvneta_bm_port_init(pdev, pp); - if (err < 0) { - dev_info(&pdev->dev, "use SW buffer management\n"); - pp->bm_priv = NULL; + if (bm_node) { + pp->bm_priv = mvneta_bm_get(bm_node); + if (pp->bm_priv) { + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, + "use SW buffer management\n"); + mvneta_bm_put(pp->bm_priv); + pp->bm_priv = NULL; + } } + /* Set RX packet offset correction for platforms, whose + * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit + * platforms and 0B for 32-bit ones. + */ + pp->rx_offset_correction = max(0, + NET_SKB_PAD - + MVNETA_RX_PKT_OFFSET_CORRECTION); } of_node_put(bm_node); @@ -4526,6 +4619,7 @@ err_netdev: mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + mvneta_bm_put(pp->bm_priv); } err_free_stats: free_percpu(pp->stats); @@ -4563,6 +4657,7 @@ static int mvneta_remove(struct platform_device *pdev) mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + mvneta_bm_put(pp->bm_priv); } return 0; diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 466939f8f0cf..de468e1bdba9 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> @@ -392,6 +393,20 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv) MVNETA_BM_BPPI_SIZE); } +struct mvneta_bm *mvneta_bm_get(struct device_node *node) +{ + struct platform_device *pdev = of_find_device_by_node(node); + + return pdev ? platform_get_drvdata(pdev) : NULL; +} +EXPORT_SYMBOL_GPL(mvneta_bm_get); + +void mvneta_bm_put(struct mvneta_bm *priv) +{ + platform_device_put(priv->pdev); +} +EXPORT_SYMBOL_GPL(mvneta_bm_put); + static int mvneta_bm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index a32de432800c..c8425d35c049 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -130,10 +130,10 @@ struct mvneta_bm_pool { }; /* Declarations and definitions */ -void *mvneta_frag_alloc(unsigned int frag_size); -void mvneta_frag_free(unsigned int frag_size, void *data); - #if IS_ENABLED(CONFIG_MVNETA_BM) +struct mvneta_bm *mvneta_bm_get(struct device_node *node); +void mvneta_bm_put(struct mvneta_bm *priv); + void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map); void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, @@ -178,5 +178,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool) { return 0; } +struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; } +void mvneta_bm_put(struct mvneta_bm *priv) {} #endif /* CONFIG_MVNETA_BM */ #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile index 4d11dd9e3246..51f65a202c6e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/Makefile +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -4,4 +4,4 @@ # obj-$(CONFIG_MVPP2) := mvpp2.o -mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index def00dc3eb4e..67b9e81b7c02 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1,17 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_H_ #define _MVPP2_H_ +#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/phy.h> @@ -66,15 +64,18 @@ #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) +#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240 +#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244 +#define MVPP2_PRS_TCAM_HIT_CNT_MASK GENMASK(15, 0) /* RSS Registers */ #define MVPP22_RSS_INDEX 0x1500 #define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) #define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) #define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RXQ2RSS_TABLE 0x1504 #define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 #define MVPP22_RSS_WIDTH 0x150c /* Classifier Registers */ @@ -86,11 +87,28 @@ #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 #define MVPP2_CLS_LKP_TBL_REG 0x1818 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_FLOW_PTR(flow) ((flow) << 16) #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL0_LAST BIT(0) +#define MVPP2_CLS_FLOW_TBL0_ENG_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL0_OFFS 1 +#define MVPP2_CLS_FLOW_TBL0_ENG(x) ((x) << 1) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK 0xff +#define MVPP2_CLS_FLOW_TBL0_PORT_ID(port) ((port) << 4) +#define MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL BIT(23) #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x) +#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9) +#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7 +#define MVPP2_CLS_FLOW_TBL1_SEQ(x) ((x) << 15) #define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_FLOW_TBL2_FLD_MASK 0x3f +#define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6) +#define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 @@ -98,6 +116,32 @@ #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) +/* Classifier C2 engine Registers */ +#define MVPP22_CLS_C2_TCAM_IDX 0x1b00 +#define MVPP22_CLS_C2_TCAM_DATA0 0x1b10 +#define MVPP22_CLS_C2_TCAM_DATA1 0x1b14 +#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18 +#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c +#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 +#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_HIT_CTR 0x1b50 +#define MVPP22_CLS_C2_ACT 0x1b60 +#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) +#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) +#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) +#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) +#define MVPP22_CLS_C2_ATTR0 0x1b64 +#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) +#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f +#define MVPP22_CLS_C2_ATTR0_QHIGH_OFFS 24 +#define MVPP22_CLS_C2_ATTR0_QLOW(ql) (((ql) & 0x7) << 21) +#define MVPP22_CLS_C2_ATTR0_QLOW_MASK 0x7 +#define MVPP22_CLS_C2_ATTR0_QLOW_OFFS 21 +#define MVPP22_CLS_C2_ATTR1 0x1b68 +#define MVPP22_CLS_C2_ATTR2 0x1b6c +#define MVPP22_CLS_C2_ATTR2_RSS_EN BIT(30) +#define MVPP22_CLS_C2_ATTR3 0x1b70 + /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 @@ -275,6 +319,11 @@ #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 +/* Hit counters registers */ +#define MVPP2_CTRS_IDX 0x7040 +#define MVPP2_CLS_DEC_TBL_HIT_CTR 0x7700 +#define MVPP2_CLS_FLOW_TBL_HIT_CTR 0x7704 + /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 @@ -499,7 +548,7 @@ #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) /* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 +#define MVPP2_DEFAULT_RXQ 1 /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD_MAX 1024 @@ -553,6 +602,11 @@ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) +#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) +#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 @@ -703,6 +757,9 @@ struct mvpp2 { /* Workqueue to gather hardware statistics */ char queue_name[30]; struct workqueue_struct *stats_queue; + + /* Debugfs root entry */ + struct dentry *dbgfs_dir; }; struct mvpp2_pcpu_stats { @@ -795,6 +852,9 @@ struct mvpp2_port { bool has_tx_irqs; u32 tx_time_coal; + + /* RSS indirection table */ + u32 indir[MVPP22_RSS_TABLE_ENTRIES]; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -831,52 +891,52 @@ struct mvpp2_port { /* HW TX descriptor for PPv2.1 */ struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + __le32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ + __le16 data_size; /* data size of transmitted packet in bytes */ + __le32 buf_dma_addr; /* physical addr of transmitted buffer */ + __le32 buf_cookie; /* cookie for access to TX buffer in tx path */ + __le32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + __le32 reserved2; /* reserved (for future use) */ }; /* HW RX descriptor for PPv2.1 */ struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ + __le32 status; /* info about received packet */ + __le16 reserved1; /* parser_info (for future use, PnC) */ + __le16 data_size; /* size of received packet in bytes */ + __le32 buf_dma_addr; /* physical address of the buffer */ + __le32 buf_cookie; /* cookie for access to RX buffer in rx path */ + __le16 reserved2; /* gem_port_id (for future use, PON) */ + __le16 reserved3; /* csum_l4 (for future use, PnC) */ u8 reserved4; /* bm_qset (for future use, BM) */ u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; + __le16 reserved6; /* classify_info (for future use, PnC) */ + __le32 reserved7; /* flow_id (for future use, PnC) */ + __le32 reserved8; }; /* HW TX descriptor for PPv2.2 */ struct mvpp22_tx_desc { - u32 command; + __le32 command; u8 packet_offset; u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; + __le16 data_size; + __le64 reserved1; + __le64 buf_dma_addr_ptp; + __le64 buf_cookie_misc; }; /* HW RX descriptor for PPv2.2 */ struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; + __le32 status; + __le16 reserved1; + __le16 data_size; + __le32 reserved2; + __le32 reserved3; + __le64 buf_dma_addr_key_hash; + __le64 buf_cookie_misc; }; /* Opaque type used by the driver to manipulate the HW TX and RX @@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name); + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 8581d5b17dd5..efdb7a656835 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1,17 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 /* * RSS and Classifier helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include "mvpp2.h" #include "mvpp2_cls.h" +#include "mvpp2_prs.h" + +#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \ +{ \ + .flow_type = _type, \ + .flow_id = _id, \ + .supported_hash_opts = _opts, \ + .prs_ri = { \ + .ri = _ri, \ + .ri_mask = _ri_mask \ + } \ +} + +static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { + /* TCP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, Not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP4_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, Not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv4 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv4 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* TCP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* TCP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, not fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP22_CLS_HEK_IP6_5T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, not fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* UDP over IPv6 flows, fragmented, no vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | + MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), + + /* UDP over IPv6 flows, fragmented, with vlan tag */ + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_IP_MASK), + + /* IPv4 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP22_CLS_HEK_IP4_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv4 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP4_OTHER, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, no vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP22_CLS_HEK_IP6_2T, + MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), + + /* IPv6 flows, with vlan tag */ + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, + MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK), + + /* Non IP flow, no vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG, + 0, + MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK), + /* Non IP flow, with vlan tag */ + MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG, + MVPP22_CLS_HEK_OPT_VLAN, + 0, 0), +}; + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR); +} + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe) +{ + fe->index = index; + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index); + fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG); + fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG); + fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG); +} /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, @@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) +{ + mvpp2_write(priv, MVPP2_CTRS_IDX, index); + + return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR); +} + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + le->way = way; + le->lkpid = lkpid; + le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG); +} + /* Update classification lookup table register */ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, struct mvpp2_cls_lookup_entry *le) @@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } +/* Operations on flow entry */ +static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe) +{ + return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; +} + +static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe, + int num_of_fields) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK; + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields); +} + +static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe, + int field_index) +{ + return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) & + MVPP2_CLS_FLOW_TBL2_FLD_MASK; +} + +static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe, + int field_index, int field_id) +{ + fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index, + MVPP2_CLS_FLOW_TBL2_FLD_MASK); + fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id); +} + +static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe, + int engine) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK); + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine); +} + +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe) +{ + return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) & + MVPP2_CLS_FLOW_TBL0_ENG_MASK; +} + +static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, + bool from_packet) +{ + if (from_packet) + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; + else + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; +} + +static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq); +} + +static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, + bool is_last) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST; + fe->data[0] |= !!is_last; +} + +static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio); +} + +static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +/* Initialize the parser entry for the given flow */ +static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) +{ + mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, + flow->prs_ri.ri_mask); +} + +/* Initialize the Lookup Id table entry for the given flow */ +static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, + struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_lookup_entry le; + + le.way = 0; + le.lkpid = flow->flow_id; + + /* The default RxQ for this port is set in the C2 lookup */ + le.data = 0; + + /* We point on the first lookup in the sequence for the flow, that is + * the C2 lookup. + */ + le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id)); + + /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ + le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + mvpp2_cls_lookup_write(priv, &le); +} + +/* Initialize the flow table entries for the given flow */ +static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) +{ + struct mvpp2_cls_flow_entry fe; + int i; + + /* C2 lookup */ + memset(&fe, 0, sizeof(fe)); + fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id); + + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_last_set(&fe, 0); + mvpp2_cls_flow_pri_set(&fe, 0); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1); + + /* Add all ports */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + + /* C3Hx lookups */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + memset(&fe, 0, sizeof(fe)); + fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id); + + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_cls_flow_pri_set(&fe, i + 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE); + mvpp2_cls_flow_port_add(&fe, BIT(i)); + + mvpp2_cls_flow_write(priv, &fe); + } + + /* Update the last entry */ + mvpp2_cls_flow_last_set(&fe, 1); + mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); + + mvpp2_cls_flow_write(priv, &fe); +} + +/* Adds a field to the Header Extracted Key generation parameters*/ +static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe, + u32 field_id) +{ + int nb_fields = mvpp2_cls_flow_hek_num_get(fe); + + if (nb_fields == MVPP2_FLOW_N_FIELDS) + return -EINVAL; + + mvpp2_cls_flow_hek_set(fe, nb_fields, field_id); + + mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1); + + return 0; +} + +static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, + unsigned long hash_opts) +{ + u32 field_id; + int i; + + /* Clear old fields */ + mvpp2_cls_flow_hek_num_set(fe, 0); + fe->data[2] = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_VLAN: + field_id = MVPP22_CLS_FIELD_VLAN; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + field_id = MVPP22_CLS_FIELD_IP4SA; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + field_id = MVPP22_CLS_FIELD_IP4DA; + break; + case MVPP22_CLS_HEK_OPT_IP6SA: + field_id = MVPP22_CLS_FIELD_IP6SA; + break; + case MVPP22_CLS_HEK_OPT_IP6DA: + field_id = MVPP22_CLS_FIELD_IP6DA; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + field_id = MVPP22_CLS_FIELD_L4SIP; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + field_id = MVPP22_CLS_FIELD_L4DIP; + break; + default: + return -EINVAL; + } + if (mvpp2_flow_add_hek_field(fe, field_id)) + return -EINVAL; + } + + return 0; +} + +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +{ + if (flow >= MVPP2_N_FLOWS) + return NULL; + + return &cls_flows[flow]; +} + +/* Set the hash generation options for the given traffic flow. + * One traffic flow (in the ethtool sense) has multiple classification flows, + * to handle specific cases such as fragmentation, or the presence of a + * VLAN / DSA Tag. + * + * Each of these individual flows has different constraints, for example we + * can't hash fragmented packets on L4 data (else we would risk having packet + * re-ordering), so each classification flows masks the options with their + * supported ones. + * + */ +static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, + u16 requested_opts) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, engine, flow_index; + u16 hash_opts; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return -EINVAL; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = flow->supported_hash_opts & requested_opts; + + /* Use C3HB engine to access L4 infos. This adds L4 infos to the + * hash parameters + */ + if (hash_opts & MVPP22_CLS_HEK_L4_OPTS) + engine = MVPP22_CLS_ENGINE_C3HB; + else + engine = MVPP22_CLS_ENGINE_C3HA; + + if (mvpp2_flow_set_hek_fields(&fe, hash_opts)) + return -EINVAL; + + mvpp2_cls_flow_eng_set(&fe, engine); + + mvpp2_cls_flow_write(port->priv, &fe); + } + + return 0; +} + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) +{ + u16 hash_opts = 0; + int n_fields, i, field; + + n_fields = mvpp2_cls_flow_hek_num_get(fe); + + for (i = 0; i < n_fields; i++) { + field = mvpp2_cls_flow_hek_get(fe, i); + + switch (field) { + case MVPP22_CLS_FIELD_MAC_DA: + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + break; + case MVPP22_CLS_FIELD_VLAN: + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + break; + case MVPP22_CLS_FIELD_L3_PROTO: + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + break; + case MVPP22_CLS_FIELD_IP4SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA; + break; + case MVPP22_CLS_FIELD_IP4DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA; + break; + case MVPP22_CLS_FIELD_IP6SA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA; + break; + case MVPP22_CLS_FIELD_IP6DA: + hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA; + break; + case MVPP22_CLS_FIELD_L4SIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + break; + case MVPP22_CLS_FIELD_L4DIP: + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + break; + default: + break; + } + } + return hash_opts; +} + +/* Returns the hash opts for this flow. There are several classifier flows + * for one traffic flow, this returns an aggregation of all configurations. + */ +static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) +{ + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *flow; + int i, flow_index; + u16 hash_opts = 0; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + if (flow->flow_type != flow_type) + continue; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, + flow->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts |= mvpp2_flow_get_hek_fields(&fe); + } + + return hash_opts; +} + +static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) +{ + struct mvpp2_cls_flow *flow; + int i; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + break; + + mvpp2_cls_flow_prs_init(priv, flow); + mvpp2_cls_flow_lkp_init(priv, flow); + mvpp2_cls_flow_init(priv, flow); + } +} + +static void mvpp2_cls_c2_write(struct mvpp2 *priv, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); + + /* Write TCAM */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); + + mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); + + mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); +} + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); + + c2->index = index; + + c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); + c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); + c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); + c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); + c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); + + c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); + + c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); + c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); + c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); + c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); +} + +static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + + memset(&c2, 0, sizeof(c2)); + + c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Update RSS status after matching this entry */ + c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + /* Configure the default rx queue : Update Queue Low and Queue High, but + * don't lock, since the rx queue selection might be overridden by RSS + */ + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD); + + qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + + mvpp2_cls_c2_write(port->priv, &c2); +} + /* Classifier default initialization */ void mvpp2_cls_init(struct mvpp2 *priv) { @@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv) le.way = 1; mvpp2_cls_lookup_write(priv, &le); } + + mvpp2_cls_port_init_flows(priv); } void mvpp2_cls_port_config(struct mvpp2_port *port) @@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port) /* Update lookup ID table entry */ mvpp2_cls_lookup_write(port->priv, &le); + + mvpp2_port_c2_cls_init(port); +} + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index) +{ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index); + + return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR); +} + +static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN; + + mvpp2_cls_c2_write(port->priv, &c2); +} + +void mvpp22_rss_enable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_enable(port); +} + +void mvpp22_rss_disable(struct mvpp2_port *port) +{ + mvpp2_rss_port_c2_disable(port); } /* Set CPU queue number for oversize packets */ @@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } -void mvpp22_init_rss(struct mvpp2_port *port) +static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) +{ + int nrxqs, cpu, cpus = num_possible_cpus(); + + /* Number of RXQs per CPU */ + nrxqs = port->nrxqs / cpus; + + /* CPU that will handle this rx queue */ + cpu = rxq / nrxqs; + + if (!cpu_online(cpu)) + return port->first_rxq; + + /* Indirection to better distribute the paquets on the CPUs when + * configuring the RSS queues. + */ + return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs); +} + +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) +{ + struct mvpp2 *priv = port->priv; + int i; + + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(table) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, + mvpp22_rxfh_indir(port, port->indir[i])); + } +} + +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + u16 hash_opts = 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + if (info->data & RXH_L4_B_0_1) + hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; + if (info->data & RXH_L4_B_2_3) + hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; + /* Fallthrough */ + case IPV4_FLOW: + case IPV6_FLOW: + if (info->data & RXH_L2DA) + hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; + if (info->data & RXH_VLAN) + hash_opts |= MVPP22_CLS_HEK_OPT_VLAN; + if (info->data & RXH_L3_PROTO) + hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO; + if (info->data & RXH_IP_SRC) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA | + MVPP22_CLS_HEK_OPT_IP6SA); + if (info->data & RXH_IP_DST) + hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA | + MVPP22_CLS_HEK_OPT_IP6DA); + break; + default: return -EOPNOTSUPP; + } + + return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts); +} + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +{ + unsigned long hash_opts; + int i; + + hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type); + info->data = 0; + + for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { + switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + info->data |= RXH_L2DA; + break; + case MVPP22_CLS_HEK_OPT_VLAN: + info->data |= RXH_VLAN; + break; + case MVPP22_CLS_HEK_OPT_L3_PROTO: + info->data |= RXH_L3_PROTO; + break; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP6SA: + info->data |= RXH_IP_SRC; + break; + case MVPP22_CLS_HEK_OPT_IP4DA: + case MVPP22_CLS_HEK_OPT_IP6DA: + info->data |= RXH_IP_DST; + break; + case MVPP22_CLS_HEK_OPT_L4SIP: + info->data |= RXH_L4_B_0_1; + break; + case MVPP22_CLS_HEK_OPT_L4DIP: + info->data |= RXH_L4_B_2_3; + break; + default: + return -EINVAL; + } + } + return 0; +} + +void mvpp22_rss_port_init(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; int i; @@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port) /* Set the table width: replace the whole classifier Rx queue number * with the ones configured in RSS table entries. */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id)); mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. + /* The default RxQ is used as a key to select the RSS table to use. + * We use one RSS table per port. */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } + mvpp2_write(priv, MVPP22_RSS_INDEX, + MVPP22_RSS_INDEX_QUEUE(port->first_rxq)); + mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(port->id)); /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. + * real Rx Queues. The table entries map a hash to a port Rx Queue. */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) + port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs); - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } + mvpp22_rss_fill_table(port, port->id); + /* Configure default flows */ + mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T); + mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T); + mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 8e1d7f9ffa0b..089f05f29891 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -1,27 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * RSS and Classifier definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _MVPP2_CLS_H_ #define _MVPP2_CLS_H_ +#include "mvpp2.h" +#include "mvpp2_prs.h" + /* Classifier constants */ #define MVPP2_CLS_FLOWS_TBL_SIZE 512 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 #define MVPP2_CLS_LKP_TBL_SIZE 64 #define MVPP2_CLS_RX_QUEUES 256 -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 +/* Classifier flow constants */ + +#define MVPP2_FLOW_N_FIELDS 4 + +enum mvpp2_cls_engine { + MVPP22_CLS_ENGINE_C2 = 1, + MVPP22_CLS_ENGINE_C3A, + MVPP22_CLS_ENGINE_C3B, + MVPP22_CLS_ENGINE_C4, + MVPP22_CLS_ENGINE_C3HA = 6, + MVPP22_CLS_ENGINE_C3HB = 7, +}; + +#define MVPP22_CLS_HEK_OPT_MAC_DA BIT(0) +#define MVPP22_CLS_HEK_OPT_VLAN BIT(1) +#define MVPP22_CLS_HEK_OPT_L3_PROTO BIT(2) +#define MVPP22_CLS_HEK_OPT_IP4SA BIT(3) +#define MVPP22_CLS_HEK_OPT_IP4DA BIT(4) +#define MVPP22_CLS_HEK_OPT_IP6SA BIT(5) +#define MVPP22_CLS_HEK_OPT_IP6DA BIT(6) +#define MVPP22_CLS_HEK_OPT_L4SIP BIT(7) +#define MVPP22_CLS_HEK_OPT_L4DIP BIT(8) +#define MVPP22_CLS_HEK_N_FIELDS 9 + +#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \ + MVPP22_CLS_HEK_OPT_L4DIP) + +#define MVPP22_CLS_HEK_IP4_2T (MVPP22_CLS_HEK_OPT_IP4SA | \ + MVPP22_CLS_HEK_OPT_IP4DA) + +#define MVPP22_CLS_HEK_IP6_2T (MVPP22_CLS_HEK_OPT_IP6SA | \ + MVPP22_CLS_HEK_OPT_IP6DA) + +/* The fifth tuple in "5T" is the L4_Info field */ +#define MVPP22_CLS_HEK_IP4_5T (MVPP22_CLS_HEK_IP4_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +#define MVPP22_CLS_HEK_IP6_5T (MVPP22_CLS_HEK_IP6_2T | \ + MVPP22_CLS_HEK_L4_OPTS) + +enum mvpp2_cls_field_id { + MVPP22_CLS_FIELD_MAC_DA = 0x03, + MVPP22_CLS_FIELD_VLAN = 0x06, + MVPP22_CLS_FIELD_L3_PROTO = 0x0f, + MVPP22_CLS_FIELD_IP4SA = 0x10, + MVPP22_CLS_FIELD_IP4DA = 0x11, + MVPP22_CLS_FIELD_IP6SA = 0x17, + MVPP22_CLS_FIELD_IP6DA = 0x1a, + MVPP22_CLS_FIELD_L4SIP = 0x1d, + MVPP22_CLS_FIELD_L4DIP = 0x1e, +}; + +enum mvpp2_cls_flow_seq { + MVPP2_CLS_FLOW_SEQ_NORMAL = 0, + MVPP2_CLS_FLOW_SEQ_FIRST1, + MVPP2_CLS_FLOW_SEQ_FIRST2, + MVPP2_CLS_FLOW_SEQ_LAST, + MVPP2_CLS_FLOW_SEQ_MIDDLE +}; + +/* Classifier C2 engine constants */ +#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16) + +enum mvpp22_cls_c2_action { + MVPP22_C2_NO_UPD = 0, + MVPP22_C2_NO_UPD_LOCK, + MVPP22_C2_UPD, + MVPP22_C2_UPD_LOCK, +}; + +enum mvpp22_cls_c2_fwd_action { + MVPP22_C2_FWD_NO_UPD = 0, + MVPP22_C2_FWD_NO_UPD_LOCK, + MVPP22_C2_FWD_SW, + MVPP22_C2_FWD_SW_LOCK, + MVPP22_C2_FWD_HW, + MVPP22_C2_FWD_HW_LOCK, + MVPP22_C2_FWD_HW_LOW_LAT, + MVPP22_C2_FWD_HW_LOW_LAT_LOCK, +}; + +#define MVPP2_CLS_C2_TCAM_WORDS 5 +#define MVPP2_CLS_C2_ATTR_WORDS 5 + +struct mvpp2_cls_c2_entry { + u32 index; + u32 tcam[MVPP2_CLS_C2_TCAM_WORDS]; + u32 act; + u32 attr[MVPP2_CLS_C2_ATTR_WORDS]; +}; + +/* Classifier C2 engine entries */ +#define MVPP22_CLS_C2_RSS_ENTRY(port) (port) +#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS +/* RSS flow entries in the flow table. We have 2 entries per port for RSS. + * + * The first performs a lookup using the C2 TCAM engine, to tag the + * packet for software forwarding (needed for RSS), enable or disable RSS, and + * assign the default rx queue. + * + * The second configures the hash generation, by specifying which fields of the + * packet header are used to generate the hash, and specifies the relevant hash + * engine to use. + */ +#define MVPP22_RSS_FLOW_C2_OFFS 0 +#define MVPP22_RSS_FLOW_HASH_OFFS 1 +#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1) + +#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_C2_OFFS) +#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ + MVPP22_RSS_FLOW_HASH_OFFS) +#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port) + +/* Packet flow ID */ +enum mvpp2_prs_flow { + MVPP2_FL_START = 8, + MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START, + MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */ + MVPP2_FL_IP4_TAG, + MVPP2_FL_IP6_UNTAG, + MVPP2_FL_IP6_TAG, + MVPP2_FL_NON_IP_UNTAG, + MVPP2_FL_NON_IP_TAG, + MVPP2_FL_LAST, +}; + +struct mvpp2_cls_flow { + /* The L2-L4 traffic flow type */ + int flow_type; + + /* The first id in the flow table for this flow */ + u16 flow_id; + + /* The supported HEK fields for this flow */ + u16 supported_hash_opts; + + /* The Header Parser result_info that matches this flow */ + struct mvpp2_prs_result_info prs_ri; +}; + +#define MVPP2_N_FLOWS 52 + +#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1) +#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW) +#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \ + (port) + 1) struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; @@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry { u32 data; }; -void mvpp22_init_rss(struct mvpp2_port *port); +void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); + +void mvpp22_rss_port_init(struct mvpp2_port *port); + +void mvpp22_rss_enable(struct mvpp2_port *port); +void mvpp22_rss_disable(struct mvpp2_port *port); + +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); void mvpp2_cls_init(struct mvpp2 *priv); @@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port); void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); +int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe); + +u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); + +struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); + +u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_flow_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_flow_entry *fe); + +u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index); + +void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way, + struct mvpp2_cls_lookup_entry *le); + +u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index); + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c new file mode 100644 index 000000000000..f9744a61e5dd --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2018 Marvell + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +struct mvpp2_dbgfs_prs_entry { + int tid; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_flow_entry { + int flow; + struct mvpp2 *priv; +}; + +struct mvpp2_dbgfs_port_flow_entry { + struct mvpp2_port *port; + struct mvpp2_dbgfs_flow_entry *dbg_fe; +}; + +static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + int id = MVPP2_FLOW_C2_ENTRY(entry->flow); + + u32 hits = mvpp2_cls_flow_hits(entry->priv, id); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits); + +static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + + u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); + +static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + const char *flow_name; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + switch (f->flow_type) { + case IPV4_FLOW: + flow_name = "ipv4"; + break; + case IPV6_FLOW: + flow_name = "ipv6"; + break; + case TCP_V4_FLOW: + flow_name = "tcp4"; + break; + case TCP_V6_FLOW: + flow_name = "tcp6"; + break; + case UDP_V4_FLOW: + flow_name = "udp4"; + break; + case UDP_V6_FLOW: + flow_name = "udp6"; + break; + default: + flow_name = "other"; + } + + seq_printf(s, "%s\n", flow_name); + + return 0; +} + +static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private); +} + +static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_flow_type_fops = { + .open = mvpp2_dbgfs_flow_type_open, + .read = seq_read, + .release = mvpp2_dbgfs_flow_type_release, +}; + +static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_flow_entry *entry = s->private; + struct mvpp2_cls_flow *f; + + f = mvpp2_cls_flow_get(entry->flow); + if (!f) + return -EINVAL; + + seq_printf(s, "%d\n", f->flow_id); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id); + +static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index; + u16 hash_opts; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + hash_opts = mvpp2_flow_get_hek_fields(&fe); + + seq_printf(s, "0x%04x\n", hash_opts); + + return 0; +} + +static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode, + struct file *file) +{ + return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show, + inode->i_private); +} + +static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode, + struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private; + + kfree(flow_entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = { + .open = mvpp2_dbgfs_port_flow_hash_opt_open, + .read = seq_read, + .release = mvpp2_dbgfs_port_flow_hash_opt_release, +}; + +static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_port_flow_entry *entry = s->private; + struct mvpp2_port *port = entry->port; + struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_flow *f; + int flow_index, engine; + + f = mvpp2_cls_flow_get(entry->dbg_fe->flow); + if (!f) + return -EINVAL; + + flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + + mvpp2_cls_flow_read(port->priv, flow_index, &fe); + + engine = mvpp2_cls_flow_eng_get(&fe); + + seq_printf(s, "%d\n", engine); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); + +static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + u32 hits; + + hits = mvpp2_cls_c2_hit_count(port->priv, + MVPP22_CLS_C2_RSS_ENTRY(port->id)); + + seq_printf(s, "%u\n", hits); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); + +static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & + MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + + ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) & + MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + seq_printf(s, "%d\n", (qh << 3 | ql)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); + +static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2_cls_c2_entry c2; + int enabled; + + mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + + enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); + + seq_printf(s, "%d\n", enabled); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable); + +static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + unsigned char byte[2], enable[2]; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + u16 rvid; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (!priv->prs_shadow[tid].valid) + continue; + + if (!test_bit(port->id, &pmap)) + continue; + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + + seq_printf(s, "%u\n", rvid); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid); + +static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int i; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + mvpp2_prs_init_from_hw(port->priv, &pe, i); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap)) + seq_printf(s, "%03d\n", i); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser); + +static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused) +{ + struct mvpp2_port *port = s->private; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC || + priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + seq_printf(s, "%pM\n", da); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter); + +static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + + seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu); + +static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned int pmap; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + pmap &= MVPP2_PRS_PORT_MASK; + + seq_printf(s, "%02x\n", pmap); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap); + +static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char ai, ai_mask; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; + ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK; + + seq_printf(s, "%02x %02x\n", ai, ai_mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai); + +static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + unsigned char data[8], mask[8]; + int i; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + for (i = 0; i < 8; i++) + mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]); + + seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata); + +static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2_prs_entry pe; + + mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid); + + seq_printf(s, "%*phN\n", 14, pe.sram); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram); + +static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + int val; + + val = mvpp2_prs_hits(entry->priv, entry->tid); + if (val < 0) + return val; + + seq_printf(s, "%d\n", val); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits); + +static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) +{ + struct mvpp2_dbgfs_prs_entry *entry = s->private; + struct mvpp2 *priv = entry->priv; + int tid = entry->tid; + + seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0); + + return 0; +} + +static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file) +{ + return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private); +} + +static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct mvpp2_dbgfs_prs_entry *entry = seq->private; + + kfree(entry); + return single_release(inode, file); +} + +static const struct file_operations mvpp2_dbgfs_prs_valid_fops = { + .open = mvpp2_dbgfs_prs_valid_open, + .read = seq_read, + .release = mvpp2_dbgfs_prs_valid_release, +}; + +static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, + struct mvpp2_port *port, + struct mvpp2_dbgfs_flow_entry *entry) +{ + struct mvpp2_dbgfs_port_flow_entry *port_entry; + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + /* This will be freed by 'hash_opts' release op */ + port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL); + if (!port_entry) + return -ENOMEM; + + port_entry->port = port; + port_entry->dbg_fe = entry; + + debugfs_create_file("hash_opts", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_hash_opt_fops); + + debugfs_create_file("engine", 0444, port_dir, port_entry, + &mvpp2_dbgfs_port_flow_engine_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, + struct mvpp2 *priv, int flow) +{ + struct mvpp2_dbgfs_flow_entry *entry; + struct dentry *flow_entry_dir; + char flow_entry_name[10]; + int i, ret; + + sprintf(flow_entry_name, "%02d", flow); + + flow_entry_dir = debugfs_create_dir(flow_entry_name, parent); + if (!flow_entry_dir) + return -ENOMEM; + + /* This will be freed by 'type' release op */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->flow = flow; + entry->priv = priv; + + debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_flt_hits_fops); + + debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_dec_hits_fops); + + debugfs_create_file("type", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_type_fops); + + debugfs_create_file("id", 0444, flow_entry_dir, entry, + &mvpp2_dbgfs_flow_id_fops); + + /* Create entry for each port */ + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir, + priv->port_list[i], entry); + if (ret) + return ret; + } + return 0; +} + +static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *flow_dir; + int i, ret; + + flow_dir = debugfs_create_dir("flows", parent); + if (!flow_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_N_FLOWS; i++) { + ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, + struct mvpp2 *priv, int tid) +{ + struct mvpp2_dbgfs_prs_entry *entry; + struct dentry *prs_entry_dir; + char prs_entry_name[10]; + + if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + sprintf(prs_entry_name, "%03d", tid); + + prs_entry_dir = debugfs_create_dir(prs_entry_name, parent); + if (!prs_entry_dir) + return -ENOMEM; + + /* The 'valid' entry's ops will free that */ + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->tid = tid; + entry->priv = priv; + + /* Create each attr */ + debugfs_create_file("sram", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_sram_fops); + + debugfs_create_file("valid", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_valid_fops); + + debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_lu_fops); + + debugfs_create_file("ai", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_ai_fops); + + debugfs_create_file("header_data", 0644, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hdata_fops); + + debugfs_create_file("hits", 0444, prs_entry_dir, entry, + &mvpp2_dbgfs_prs_hits_fops); + + return 0; +} + +static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *prs_dir; + int i, ret; + + prs_dir = debugfs_create_dir("parser", parent); + if (!prs_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) { + ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + +static int mvpp2_dbgfs_port_init(struct dentry *parent, + struct mvpp2_port *port) +{ + struct dentry *port_dir; + + port_dir = debugfs_create_dir(port->dev->name, parent); + if (IS_ERR(port_dir)) + return PTR_ERR(port_dir); + + debugfs_create_file("parser_entries", 0444, port_dir, port, + &mvpp2_dbgfs_port_parser_fops); + + debugfs_create_file("mac_filter", 0444, port_dir, port, + &mvpp2_dbgfs_filter_fops); + + debugfs_create_file("vid_filter", 0444, port_dir, port, + &mvpp2_dbgfs_port_vid_fops); + + debugfs_create_file("c2_hits", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_hits_fops); + + debugfs_create_file("default_rxq", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_rxq_fops); + + debugfs_create_file("rss_enable", 0444, port_dir, port, + &mvpp2_dbgfs_flow_c2_enable_fops); + + return 0; +} + +void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) +{ + debugfs_remove_recursive(priv->dbgfs_dir); +} + +void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) +{ + struct dentry *mvpp2_dir, *mvpp2_root; + int ret, i; + + mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); + if (!mvpp2_root) { + mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); + if (IS_ERR(mvpp2_root)) + return; + } + + mvpp2_dir = debugfs_create_dir(name, mvpp2_root); + if (IS_ERR(mvpp2_dir)) + return; + + priv->dbgfs_dir = mvpp2_dir; + + ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); + if (ret) + goto err; + + for (i = 0; i < priv->port_count; i++) { + ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); + if (ret) + goto err; + } + + ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv); + if (ret) + goto err; + + return; +err: + mvpp2_dbgfs_cleanup(priv); +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 0319ed9ef8b8..32d785b616e1 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/acpi.h> @@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, #define MVPP2_QDIST_SINGLE_MODE 0 #define MVPP2_QDIST_MULTI_MODE 1 -static int queue_mode = MVPP2_QDIST_SINGLE_MODE; +static int queue_mode = MVPP2_QDIST_MULTI_MODE; module_param(queue_mode, int, 0444); MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); @@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; + return le32_to_cpu(tx_desc->pp21.buf_dma_addr); else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & + MVPP2_DESC_DMA_MASK; } static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, @@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, offset = dma_addr & MVPP2_TX_DESC_ALIGN; if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); tx_desc->pp21.packet_offset = offset; } else { - u64 val = (u64)addr; + __le64 val = cpu_to_le64(addr); - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); tx_desc->pp22.buf_dma_addr_ptp |= val; tx_desc->pp22.packet_offset = offset; } @@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; + return le16_to_cpu(tx_desc->pp21.data_size); else - return tx_desc->pp22.data_size; + return le16_to_cpu(tx_desc->pp22.data_size); } static void mvpp2_txdesc_size_set(struct mvpp2_port *port, @@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port, size_t size) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; + tx_desc->pp21.data_size = cpu_to_le16(size); else - tx_desc->pp22.data_size = size; + tx_desc->pp22.data_size = cpu_to_le16(size); } static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, @@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, unsigned int command) { if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; + tx_desc->pp21.command = cpu_to_le32(command); else - tx_desc->pp22.command = command; + tx_desc->pp22.command = cpu_to_le32(command); } static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, @@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; + return le32_to_cpu(rx_desc->pp21.buf_dma_addr); else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & + MVPP2_DESC_DMA_MASK; } static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; + return le32_to_cpu(rx_desc->pp21.buf_cookie); else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; + return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & + MVPP2_DESC_DMA_MASK; } static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; + return le16_to_cpu(rx_desc->pp21.data_size); else - return rx_desc->pp22.data_size; + return le16_to_cpu(rx_desc->pp22.data_size); } static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; + return le32_to_cpu(rx_desc->pp21.status); else - return rx_desc->pp22.status; + return le32_to_cpu(rx_desc->pp22.status); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) @@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; - if (l3_proto == swab16(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { @@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } +static bool mvpp22_rss_is_supported(void) +{ + return queue_mode == MVPP2_QDIST_MULTI_MODE; +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev) mvpp2_start_dev(port); - if (priv->hw_version == MVPP22) - mvpp22_init_rss(port); - /* Start hardware statistics gathering */ queue_delayed_work(priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); @@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev, } } + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + mvpp22_rss_enable(port); + else + mvpp22_rss_disable(port); + } + return 0; } @@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, return phylink_ethtool_ksettings_set(port->phylink, cmd); } +static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXFH: + ret = mvpp2_ethtool_rxfh_get(port, info); + break; + case ETHTOOL_GRXRINGS: + info->data = port->nrxqs; + break; + default: + return -ENOTSUPP; + } + + return ret; +} + +static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret = 0; + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = mvpp2_ethtool_rxfh_set(port, info); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) +{ + return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0; +} + +static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (indir) + memcpy(indir, port->indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + + if (hfunc) + *hfunc = ETH_RSS_HASH_CRC32; + + return 0; +} + +static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported()) + return -EOPNOTSUPP; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (indir) { + memcpy(port->indir, indir, + ARRAY_SIZE(port->indir) * sizeof(port->indir[0])); + mvpp22_rss_fill_table(port, port->id); + } + + return 0; +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_pauseparam = mvpp2_ethtool_set_pause_param, .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, + .get_rxnfc = mvpp2_ethtool_get_rxnfc, + .set_rxnfc = mvpp2_ethtool_set_rxnfc, + .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, + .get_rxfh = mvpp2_ethtool_get_rxfh, + .set_rxfh = mvpp2_ethtool_set_rxfh, + }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; - if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || - (port->ntxqs > MVPP2_MAX_TXQ)) + if (port->nrxqs % MVPP2_DEFAULT_RXQ || + port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) return -EINVAL; /* Disable port */ @@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); + if (mvpp22_rss_is_supported()) + mvpp22_rss_port_init(port); + /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); @@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_FILTER; + if (mvpp22_rss_is_supported()) + dev->hw_features |= NETIF_F_RXHASH; + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev) (unsigned long)of_device_get_match_data(&pdev->dev); } + /* multi queue mode isn't supported on PPV2.1, fallback to single + * mode + */ + if (priv->hw_version == MVPP21) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) @@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_port_probe; } + mvpp2_dbgfs_init(priv, pdev->name); + platform_set_drvdata(pdev, priv); return 0; @@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev) struct fwnode_handle *port_fwnode; int i = 0; + mvpp2_dbgfs_cleanup(priv); + flush_workqueue(priv->stats_queue); destroy_workqueue(priv->stats_queue); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 6bb69f086794..392fd895f278 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Header Parser helpers for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> @@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) return -EINVAL; /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); return 0; } /* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) { int i; @@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } @@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK); + pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK); } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port)); else - pe->tcam.byte[enable_off] |= 1 << port; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port)); } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK); + pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK); } /* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; + return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ @@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos); + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos; + pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos); } /* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) { - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; + int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE; + + *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff; + *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); u16 tcam_data; - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; + tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff; + return tcam_data == data; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + int i; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i); else - pe->tcam.byte[ai_idx] &= ~(1 << i); + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i); } - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; + pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable); } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; + return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK; } /* Set ethertype in tcam sw entry */ @@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) + u32 val) { - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); + pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num))); } /* Update ri bits in sram sw entry */ @@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_RI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } @@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; + return pe->sram[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ @@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i, + 1); else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + mvpp2_prs_sram_bits_clear(pe, + MVPP2_PRS_SRAM_AI_OFFS + i, + 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } @@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + /* ai is stored on bits 90->97; so it spreads across two u32 */ + int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS); + int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS); - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + bits = (pe->sram[ai_off] >> ai_shift) | + (pe->sram[ai_off + 1] << (32 - ai_shift)); return bits; } @@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, } /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, @@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, + offset & MVPP2_PRS_SRAM_UDF_MASK); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, @@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); @@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) continue; mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && + mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); if (!match) continue; @@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, pe.index = tid; /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, @@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv) pe.index = tid; - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) MVPP2_PRS_IPV4_IHL_MASK); /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); @@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) return 0; } +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) +{ + struct mvpp2_prs_entry pe; + u8 *ri_byte, *ri_byte_mask; + int tid, i; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + ri_byte = (u8 *)&ri; + ri_byte_mask = (u8 *)&ri_mask; + + mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + for (i = 0; i < 4; i++) { + mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i], + ri_byte_mask[i]); + } + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + /* Set prs flow for the port */ int mvpp2_prs_def_flow(struct mvpp2_port *port) { @@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) return 0; } + +int mvpp2_prs_hits(struct mvpp2 *priv, int index) +{ + u32 val; + + if (index > MVPP2_PRS_TCAM_SRAM_SIZE) + return -EINVAL; + + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); + + val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); + + val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + + return val; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h index 22fbbc4c8b28..e22f6c85d380 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -1,22 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Header Parser definitions for Marvell PPv2 Network Controller * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + #include <linux/kernel.h> #include <linux/netdevice.h> +#include <linux/platform_device.h> #include "mvpp2.h" -#ifndef _MVPP2_PRS_H_ -#define _MVPP2_PRS_H_ - /* Parser constants */ #define MVPP2_PRS_TCAM_SRAM_SIZE 256 #define MVPP2_PRS_TCAM_WORDS 6 @@ -50,17 +48,25 @@ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). */ #define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_AI_MASK 0xff #define MVPP2_PRS_PORT_MASK 0xff #define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 + +/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */ +#define MVPP2_PRS_BYTE_TO_WORD(byte) ((byte) / 2) +#define MVPP2_PRS_BYTE_IN_WORD(byte) ((byte) % 2) + +#define MVPP2_PRS_TCAM_EN(data) ((data) << 16) +#define MVPP2_PRS_TCAM_AI_WORD 4 +#define MVPP2_PRS_TCAM_AI(ai) (ai) +#define MVPP2_PRS_TCAM_AI_EN(ai) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai)) +#define MVPP2_PRS_TCAM_PORT_WORD 4 +#define MVPP2_PRS_TCAM_PORT(p) ((p) << 8) +#define MVPP2_PRS_TCAM_PORT_EN(p) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p)) +#define MVPP2_PRS_TCAM_LU_WORD 5 +#define MVPP2_PRS_TCAM_LU(lu) (lu) +#define MVPP2_PRS_TCAM_LU_EN(lu) MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu)) +#define MVPP2_PRS_TCAM_INV_WORD 5 #define MVPP2_PRS_VID_TCAM_BYTE 2 @@ -146,6 +152,7 @@ #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff #define MVPP2_PRS_SRAM_UDF_OFFS 73 #define MVPP2_PRS_SRAM_UDF_BITS 8 #define MVPP2_PRS_SRAM_UDF_MASK 0xff @@ -214,6 +221,10 @@ #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) #define MVPP2_PRS_RI_DROP_MASK 0x80000000 +#define MVPP2_PRS_IP_MASK (MVPP2_PRS_RI_L3_PROTO_MASK | \ + MVPP2_PRS_RI_IP_FRAG_MASK | \ + MVPP2_PRS_RI_L4_PROTO_MASK) + /* Sram additional info bits assignment */ #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) @@ -255,20 +266,15 @@ enum mvpp2_prs_lookup { MVPP2_PRS_LU_LAST, }; -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - struct mvpp2_prs_entry { u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; + u32 tcam[MVPP2_PRS_TCAM_WORDS]; + u32 sram[MVPP2_PRS_SRAM_WORDS]; +}; + +struct mvpp2_prs_result_info { + u32 ri; + u32 ri_mask; }; struct mvpp2_prs_shadow { @@ -288,10 +294,21 @@ struct mvpp2_prs_shadow { int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid); + +unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe); + +void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable); + int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); +int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask); + int mvpp2_prs_def_flow(struct mvpp2_port *port); void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); @@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port); int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); +int mvpp2_prs_hits(struct mvpp2 *priv, int index); + #endif diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d8ebf0a05e0c..6e6abdc399de 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -605,10 +605,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dev, - cnt * sizeof(struct mtk_tx_dma), - ð->phy_scratch_ring, - GFP_ATOMIC | __GFP_ZERO); + eth->scratch_ring = dma_zalloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + ð->phy_scratch_ring, + GFP_ATOMIC); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -623,7 +623,6 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(dma_mapping_error(eth->dev, dma_addr))) return -ENOMEM; - memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); phy_ring_tail = eth->phy_scratch_ring + (sizeof(struct mtk_tx_dma) * (cnt - 1)); @@ -1221,14 +1220,11 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dev, - MTK_DMA_SIZE * sz, - &ring->phys, - GFP_ATOMIC | __GFP_ZERO); + ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_ATOMIC); if (!ring->dma) goto no_tx_mem; - memset(ring->dma, 0, MTK_DMA_SIZE * sz); for (i = 0; i < MTK_DMA_SIZE; i++) { int next = (i + 1) % MTK_DMA_SIZE; u32 next_ptr = ring->phys + next * sz; @@ -1321,10 +1317,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; } - ring->dma = dma_alloc_coherent(eth->dev, - rx_dma_size * sizeof(*ring->dma), - &ring->phys, - GFP_ATOMIC | __GFP_ZERO); + ring->dma = dma_zalloc_coherent(eth->dev, + rx_dma_size * sizeof(*ring->dma), + &ring->phys, GFP_ATOMIC); if (!ring->dma) return -ENOMEM; @@ -2463,42 +2458,6 @@ free_netdev: return err; } -static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) -{ - u32 val[2], id[4]; - - regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]); - regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]); - - id[3] = ((val[0] >> 16) & 0xff) - '0'; - id[2] = ((val[0] >> 24) & 0xff) - '0'; - id[1] = (val[1] & 0xff) - '0'; - id[0] = ((val[1] >> 8) & 0xff) - '0'; - - *chip_id = (id[3] * 1000) + (id[2] * 100) + - (id[1] * 10) + id[0]; - - if (!(*chip_id)) { - dev_err(eth->dev, "failed to get chip id\n"); - return -ENODEV; - } - - dev_info(eth->dev, "chip id = %d\n", *chip_id); - - return 0; -} - -static bool mtk_is_hwlro_supported(struct mtk_eth *eth) -{ - switch (eth->chip_id) { - case MT7622_ETH: - case MT7623_ETH: - return true; - } - - return false; -} - static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2577,11 +2536,7 @@ static int mtk_probe(struct platform_device *pdev) if (err) return err; - err = mtk_get_chip_id(eth, ð->chip_id); - if (err) - return err; - - eth->hwlro = mtk_is_hwlro_supported(eth); + eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); for_each_child_of_node(pdev->dev.of_node, mac_np) { if (!of_device_is_compatible(mac_np, @@ -2670,19 +2625,19 @@ static int mtk_remove(struct platform_device *pdev) } static const struct mtk_soc_data mt2701_data = { - .caps = MTK_GMAC1_TRGMII, + .caps = MTK_GMAC1_TRGMII | MTK_HWLRO, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; static const struct mtk_soc_data mt7622_data = { - .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, + .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, }; static const struct mtk_soc_data mt7623_data = { - .caps = MTK_GMAC1_TRGMII, + .caps = MTK_GMAC1_TRGMII | MTK_HWLRO, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 672b8c353c47..46819297fc3e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -566,6 +566,7 @@ struct mtk_rx_ring { #define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII) #define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ MTK_GMAC2_SGMII) +#define MTK_HWLRO BIT(12) #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) /* struct mtk_eth_data - This is the structure holding all differences @@ -635,7 +636,6 @@ struct mtk_eth { struct regmap *ethsys; struct regmap *sgmiisys; struct regmap *pctl; - u32 chip_id; bool hwlro; refcount_t dma_refcnt; struct mtk_tx_ring tx_ring; diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile index 16b10d01fcf4..3f400770fcd8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Makefile +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ - srq.o resource_tracker.o + srq.o resource_tracker.o crdump.o obj-$(CONFIG_MLX4_EN) += mlx4_en.o diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index e2b6b0cac1ac..c81d15bf259c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) dev = persist->dev; mlx4_err(dev, "device is going to be reset\n"); - if (mlx4_is_slave(dev)) + if (mlx4_is_slave(dev)) { err = mlx4_reset_slave(dev); - else + } else { + mlx4_crdump_collect(dev); err = mlx4_reset_master(dev); + } if (!err) { mlx4_err(dev, "device was reset successfully\n"); @@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP && !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { - err = mlx4_restart_one(persist->pdev); + err = mlx4_restart_one(persist->pdev, false, NULL); mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c new file mode 100644 index 000000000000..88316c743820 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx4.h" + +#define BAD_ACCESS 0xBADACCE5 +#define HEALTH_BUFFER_SIZE 0x40 +#define CR_ENABLE_BIT swab32(BIT(6)) +#define CR_ENABLE_BIT_OFFSET 0xF3F04 +#define MAX_NUM_OF_DUMPS_TO_STORE (8) + +static const char *region_cr_space_str = "cr-space"; +static const char *region_fw_health_str = "fw-health"; + +/* Set to true in case cr enable bit was set to true before crdump */ +static bool crdump_enbale_bit_set; + +static void crdump_enable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Get current enable bit value */ + crdump_enbale_bit_set = + readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT; + + /* Enable FW CR filter (set bit6 to 0) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); + + /* Enable block volatile crspace accesses */ + writel(swab32(1), cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); +} + +static void crdump_disable_crspace_access(struct mlx4_dev *dev, + u8 __iomem *cr_space) +{ + /* Disable block volatile crspace accesses */ + writel(0, cr_space + dev->caps.health_buffer_addrs + + HEALTH_BUFFER_SIZE); + + /* Restore FW CR filter value (set bit6 to original value) */ + if (crdump_enbale_bit_set) + writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT, + cr_space + CR_ENABLE_BIT_OFFSET); +} + +static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 *crspace_data; + int offset; + int err; + + if (!crdump->region_crspace) { + mlx4_err(dev, "crdump: cr-space region is NULL\n"); + return; + } + + /* Try to collect CR space */ + cr_res_size = pci_resource_len(pdev, 0); + crspace_data = kvmalloc(cr_res_size, GFP_KERNEL); + if (crspace_data) { + for (offset = 0; offset < cr_res_size; offset += 4) + *(u32 *)(crspace_data + offset) = + readl(cr_space + offset); + + err = devlink_region_snapshot_create(crdump->region_crspace, + cr_res_size, crspace_data, + id, &kvfree); + if (err) { + kvfree(crspace_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_cr_space_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_cr_space_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n"); + } +} + +static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev, + u8 __iomem *cr_space, + u32 id) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + u8 *health_data; + int offset; + int err; + + if (!crdump->region_fw_health) { + mlx4_err(dev, "crdump: fw-health region is NULL\n"); + return; + } + + /* Try to collect health buffer */ + health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL); + if (health_data) { + u8 __iomem *health_buf_start = + cr_space + dev->caps.health_buffer_addrs; + + for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4) + *(u32 *)(health_data + offset) = + readl(health_buf_start + offset); + + err = devlink_region_snapshot_create(crdump->region_fw_health, + HEALTH_BUFFER_SIZE, + health_data, + id, &kvfree); + if (err) { + kvfree(health_data); + mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n", + region_fw_health_str, id, err); + } else { + mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n", + id, region_fw_health_str); + } + } else { + mlx4_err(dev, "crdump: Failed to allocate health buffer\n"); + } +} + +int mlx4_crdump_collect(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + unsigned long cr_res_size; + u8 __iomem *cr_space; + u32 id; + + if (!dev->caps.health_buffer_addrs) { + mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n"); + return 0; + } + + if (!crdump->snapshot_enable) { + mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n"); + return 0; + } + + cr_res_size = pci_resource_len(pdev, 0); + + cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size); + if (!cr_space) { + mlx4_err(dev, "crdump: Failed to map pci cr region\n"); + return -ENODEV; + } + + crdump_enable_crspace_access(dev, cr_space); + + /* Get the available snapshot ID for the dumps */ + id = devlink_region_shapshot_id_get(devlink); + + /* Try to capture dumps */ + mlx4_crdump_collect_crspace(dev, cr_space, id); + mlx4_crdump_collect_fw_health(dev, cr_space, id); + + crdump_disable_crspace_access(dev, cr_space); + + iounmap(cr_space); + return 0; +} + +int mlx4_crdump_init(struct mlx4_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + struct pci_dev *pdev = dev->persist->pdev; + + crdump->snapshot_enable = false; + + /* Create cr-space region */ + crdump->region_crspace = + devlink_region_create(devlink, + region_cr_space_str, + MAX_NUM_OF_DUMPS_TO_STORE, + pci_resource_len(pdev, 0)); + if (IS_ERR(crdump->region_crspace)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_cr_space_str, + PTR_ERR(crdump->region_crspace)); + + /* Create fw-health region */ + crdump->region_fw_health = + devlink_region_create(devlink, + region_fw_health_str, + MAX_NUM_OF_DUMPS_TO_STORE, + HEALTH_BUFFER_SIZE); + if (IS_ERR(crdump->region_fw_health)) + mlx4_warn(dev, "crdump: create devlink region %s err %ld\n", + region_fw_health_str, + PTR_ERR(crdump->region_fw_health)); + + return 0; +} + +void mlx4_crdump_end(struct mlx4_dev *dev) +{ + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + + devlink_region_destroy(crdump->region_fw_health); + devlink_region_destroy(crdump->region_crspace); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 65eb06e017e4..6785661d1a72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx4_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx4_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0227786308af..1857ee0f0871 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return fallback(dev, skb); + return fallback(dev, skb, NULL); - return fallback(dev, skb) % rings_p_up; + return fallback(dev, skb, NULL) % rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, const void *src, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 46dcbfbe4c5e..babcfd9c0571 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 - +#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4 dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->rl_caps.min_unit = size >> 14; } + MLX4_GET(dev_cap->health_buffer_addrs, outbox, + QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index cd6399c76bfd..650ae08c71de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -128,6 +128,7 @@ struct mlx4_dev_cap { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; struct mlx4_rate_limit_caps rl_caps; + u32 health_buffer_addrs; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; bool wol_port[MLX4_MAX_PORTS + 1]; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 872014702fc1..d2d59444f562 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -159,9 +159,10 @@ static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); -int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +int log_mtts_per_seg = ilog2(1); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); -MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment " + "(0-7) (default: 0)"); static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; static int arr_argc = 2; @@ -177,6 +178,131 @@ struct mlx4_port_config { static atomic_t pf_loading = ATOMIC_INIT(0); +static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + ctx->val.vbool = !!mlx4_internal_err_reset; + return 0; +} + +static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + mlx4_internal_err_reset = ctx->val.vbool; + return 0; +} + +static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + ctx->val.vbool = dev->persist->crdump.snapshot_enable; + return 0; +} + +static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + + dev->persist->crdump.snapshot_enable = ctx->val.vbool; + return 0; +} + +static int +mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + u32 value = val.vu32; + + if (value < 1 || value > 128) + return -ERANGE; + + if (!is_power_of_2(value)) { + NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2"); + return -EINVAL; + } + + return 0; +} + +enum mlx4_devlink_param_id { + MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, +}; + +static const struct devlink_param mlx4_devlink_params[] = { + DEVLINK_PARAM_GENERIC(INT_ERR_RESET, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_ierr_reset_get, + mlx4_devlink_ierr_reset_set, NULL), + DEVLINK_PARAM_GENERIC(MAX_MACS, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx4_devlink_max_macs_validate), + DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT, + BIT(DEVLINK_PARAM_CMODE_RUNTIME) | + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + mlx4_devlink_crdump_snapshot_get, + mlx4_devlink_crdump_snapshot_set, NULL), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), +}; + +static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, + union devlink_param_value init_val) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + int err; + + err = devlink_param_driverinit_value_set(devlink, param_id, init_val); + if (err) + mlx4_warn(dev, + "devlink set parameter %u value failed (err = %d)", + param_id, err); +} + +static void mlx4_devlink_set_params_init_values(struct devlink *devlink) +{ + union devlink_param_value value; + + value.vbool = !!mlx4_internal_err_reset; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + value); + + value.vu32 = 1UL << log_num_mac; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); + + value.vbool = enable_64b_cqe_eqe; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + value); + + value.vbool = enable_4k_uar; + mlx4_devlink_set_init_value(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + value); + + value.vbool = false; + mlx4_devlink_set_init_value(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + value); +} + static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { @@ -428,6 +554,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; dev->caps.wol_port[1] = dev_cap->wol_port[1]; dev->caps.wol_port[2] = dev_cap->wol_port[2]; + dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs; /* Save uar page shift */ if (!mlx4_is_slave(dev)) { @@ -3711,10 +3838,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, } } - err = mlx4_catas_init(&priv->dev); + err = mlx4_crdump_init(&priv->dev); if (err) goto err_release_regions; + err = mlx4_catas_init(&priv->dev); + if (err) + goto err_crdump; + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); if (err) goto err_catas; @@ -3724,6 +3855,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, err_catas: mlx4_catas_end(&priv->dev); +err_crdump: + mlx4_crdump_end(&priv->dev); + err_release_regions: pci_release_regions(pdev); @@ -3757,8 +3891,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, return __set_port_type(info, mlx4_port_type); } +static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_fw_crdump *crdump = &dev->persist->crdump; + union devlink_param_value saved_value; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + &saved_value); + if (!err && mlx4_internal_err_reset != saved_value.vbool) { + mlx4_internal_err_reset = saved_value.vbool; + /* Notify on value changed on runtime configuration mode */ + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET); + } + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &saved_value); + if (!err) + log_num_mac = order_base_2(saved_value.vu32); + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + &saved_value); + if (!err) + enable_64b_cqe_eqe = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + &saved_value); + if (!err) + enable_4k_uar = saved_value.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + &saved_value); + if (!err && crdump->snapshot_enable != saved_value.vbool) { + crdump->snapshot_enable = saved_value.vbool; + devlink_param_value_changed(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT); + } +} + +static int mlx4_devlink_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct mlx4_priv *priv = devlink_priv(devlink); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_dev_persistent *persist = dev->persist; + int err; + + if (persist->num_vfs) + mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n"); + err = mlx4_restart_one(persist->pdev, true, devlink); + if (err) + mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err); + + return err; +} + static const struct devlink_ops mlx4_devlink_ops = { .port_type_set = mlx4_devlink_port_type_set, + .reload = mlx4_devlink_reload, }; static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) @@ -3792,14 +3986,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = devlink_register(devlink, &pdev->dev); if (ret) goto err_persist_free; - - ret = __mlx4_init_one(pdev, id->driver_data, priv); + ret = devlink_params_register(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); if (ret) goto err_devlink_unregister; + mlx4_devlink_set_params_init_values(devlink); + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) + goto err_params_unregister; pci_save_state(pdev); return 0; +err_params_unregister: + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: devlink_unregister(devlink); err_persist_free: @@ -3929,6 +4130,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) else mlx4_info(dev, "%s: interface is down\n", __func__); mlx4_catas_end(dev); + mlx4_crdump_end(dev); if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { mlx4_warn(dev, "Disabling SR-IOV\n"); pci_disable_sriov(pdev); @@ -3936,6 +4138,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); mlx4_pci_disable_device(dev); + devlink_params_unregister(devlink, mlx4_devlink_params, + ARRAY_SIZE(mlx4_devlink_params)); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); @@ -3960,7 +4164,7 @@ static int restore_current_port_types(struct mlx4_dev *dev, return err; } -int mlx4_restart_one(struct pci_dev *pdev) +int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@ -3973,6 +4177,8 @@ int mlx4_restart_one(struct pci_dev *pdev) memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mlx4_unload_one(pdev); + if (reload) + mlx4_devlink_param_load_driverinit_values(devlink); err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); if (err) { mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", @@ -4205,7 +4411,7 @@ static int __init mlx4_verify_params(void) if (use_prio != 0) pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); - if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { + if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) { pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); return -1; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cb9e923e8399..ebcd2778eeb3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -84,7 +84,6 @@ enum { MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), - MLX4_MTT_ENTRY_PER_SEG = 8, }; enum { @@ -1042,7 +1041,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev); void mlx4_stop_catas_poll(struct mlx4_dev *dev); int mlx4_catas_init(struct mlx4_dev *dev); void mlx4_catas_end(struct mlx4_dev *dev); -int mlx4_restart_one(struct pci_dev *pdev); +int mlx4_crdump_init(struct mlx4_dev *dev); +void mlx4_crdump_end(struct mlx4_dev *dev); +int mlx4_restart_one(struct pci_dev *pdev, bool reload, + struct devlink *devlink); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, @@ -1227,6 +1229,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); int mlx4_comm_internal_err(u32 slave_read); +int mlx4_crdump_collect(struct mlx4_dev *dev); + int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); void mlx4_do_sense_ports(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ace6545f82e6..c3228b89df46 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index bae8b22edbb7..ba361c5fbda3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -105,7 +105,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, min(1UL << (31 - log_mtts_per_seg), - si.totalram >> (log_mtts_per_seg - 1)))); + (si.totalram << 1) >> log_mtts_per_seg))); + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 2545296a0c08..7a84dd07ced2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -3,7 +3,7 @@ # config MLX5_CORE - tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" + tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" depends on MAY_USE_DEVLINK depends on PCI imply PTP_1588_CLOCK @@ -27,7 +27,7 @@ config MLX5_FPGA sandbox-specific client drivers. config MLX5_CORE_EN - bool "Mellanox Technologies ConnectX-4 Ethernet support" + bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on IPV6=y || IPV6=n || MLX5_CORE=m select PAGE_POOL @@ -69,7 +69,7 @@ config MLX5_CORE_EN_DCB If unsure, set to Y config MLX5_CORE_IPOIB - bool "Mellanox Technologies ConnectX-4 IPoIB offloads support" + bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support" depends on MLX5_CORE_EN default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9efbf193ad5a..f20fda1ced4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,7 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ - diag/fs_tracepoint.o + diag/fs_tracepoint.o diag/fw_tracer.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o @@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o lib/vxlan.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h new file mode 100644 index 000000000000..c13260467750 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h @@ -0,0 +1,37 @@ +#ifndef __MLX5E_ACCEL_H__ +#define __MLX5E_ACCEL_H__ + +#ifdef CONFIG_MLX5_ACCEL + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include "en.h" + +static inline bool is_metadata_hdr_valid(struct sk_buff *skb) +{ + __be16 *ethtype; + + if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) + return false; + ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); + if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + return false; + return true; +} + +static inline void remove_metadata_hdr(struct sk_buff *skb) +{ + struct ethhdr *old_eth; + struct ethhdr *new_eth; + + /* Remove the metadata from the buffer */ + old_eth = (struct ethhdr *)skb->data; + new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); + memmove(new_eth, old_eth, 2 * ETH_ALEN); + /* Ethertype is already in its new place */ + skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); +} + +#endif /* CONFIG_MLX5_ACCEL */ + +#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index 77ac19f38cbe..da7bd26368f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -37,17 +37,26 @@ #include "mlx5_core.h" #include "fpga/tls.h" -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { - return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, p_swid); + return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, p_swid, + direction_sx); } -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { - mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); + mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); +} + +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); } bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index 6f9c9f446ecc..def4093ebfae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits { u8 reserved_at_2[0x1e]; }; -int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); -void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); +int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); +void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx); +int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); @@ -72,10 +76,14 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); #else static inline int -mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } -static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } +mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { return -ENOTSUPP; } +static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { } +static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, + u32 seq, u64 rcd_sn) { return 0; } static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 384c1fa49081..f498c7730c5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -278,6 +278,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_PSV: case MLX5_CMD_OP_DESTROY_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ: + case MLX5_CMD_OP_DESTROY_XRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: @@ -310,6 +311,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_DESTROY_QP: + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -346,6 +348,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_ARM_XRC_SRQ: + case MLX5_CMD_OP_CREATE_XRQ: + case MLX5_CMD_OP_QUERY_XRQ: + case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_QUERY_DCT: @@ -427,6 +432,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_FPGA_MODIFY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -452,6 +458,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SET_HCA_CAP); MLX5_COMMAND_STR_CASE(QUERY_ISSI); MLX5_COMMAND_STR_CASE(SET_ISSI); + MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); MLX5_COMMAND_STR_CASE(CREATE_MKEY); MLX5_COMMAND_STR_CASE(QUERY_MKEY); MLX5_COMMAND_STR_CASE(DESTROY_MKEY); @@ -599,6 +606,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); + MLX5_COMMAND_STR_CASE(CREATE_XRQ); + MLX5_COMMAND_STR_CASE(DESTROY_XRQ); + MLX5_COMMAND_STR_CASE(QUERY_XRQ); + MLX5_COMMAND_STR_CASE(ARM_XRQ); + MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); + MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); default: return "unknown command opcode"; } } @@ -677,7 +690,7 @@ struct mlx5_ifc_mbox_out_bits { struct mlx5_ifc_mbox_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -697,6 +710,7 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) u8 status; u16 opcode; u16 op_mod; + u16 uid; mlx5_cmd_mbox_status(out, &status, &syndrome); if (!status) @@ -704,8 +718,15 @@ static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) opcode = MLX5_GET(mbox_in, in, opcode); op_mod = MLX5_GET(mbox_in, in, op_mod); + uid = MLX5_GET(mbox_in, in, uid); - mlx5_core_err(dev, + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) + mlx5_core_err_rl(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + mlx5_command_str(opcode), opcode, op_mod, + cmd_status_str(status), status, syndrome); + else + mlx5_core_dbg(dev, "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", mlx5_command_str(opcode), opcode, op_mod, @@ -1022,7 +1043,10 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf, if (!dbg->in_msg || !dbg->out_msg) return -ENOMEM; - if (copy_from_user(lbuf, buf, sizeof(lbuf))) + if (count < sizeof(lbuf) - 1) + return -EINVAL; + + if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) return -EFAULT; lbuf[sizeof(lbuf) - 1] = 0; @@ -1226,21 +1250,12 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - int copy; - - if (*pos) - return 0; if (!dbg->out_msg) return -ENOMEM; - copy = min_t(int, count, dbg->outlen); - if (copy_to_user(buf, dbg->out_msg, copy)) - return -EFAULT; - - *pos += copy; - - return copy; + return simple_read_from_buffer(buf, count, pos, dbg->out_msg, + dbg->outlen); } static const struct file_operations dfops = { @@ -1258,19 +1273,11 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, char outlen[8]; int err; - if (*pos) - return 0; - err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); if (err < 0) return err; - if (copy_to_user(buf, &outlen, err)) - return -EFAULT; - - *pos += err; - - return err; + return simple_read_from_buffer(buf, count, pos, outlen, err); } static ssize_t outlen_write(struct file *filp, const char __user *buf, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 413080a312a7..90fabd612b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -150,22 +150,13 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, int ret; char tbuf[22]; - if (*pos) - return 0; - stats = filp->private_data; spin_lock_irq(&stats->lock); if (stats->n) field = div64_u64(stats->sum, stats->n); spin_unlock_irq(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); - if (ret > 0) { - if (copy_to_user(buf, tbuf, ret)) - return -EFAULT; - } - - *pos += ret; - return ret; + return simple_read_from_buffer(buf, count, pos, tbuf, ret); } static ssize_t average_write(struct file *filp, const char __user *buf, @@ -442,9 +433,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, u64 field; int ret; - if (*pos) - return 0; - desc = filp->private_data; d = (void *)(desc - desc->i) - sizeof(*d); switch (d->type) { @@ -470,13 +458,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, else ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); - if (ret > 0) { - if (copy_to_user(buf, tbuf, ret)) - return -EFAULT; - } - - *pos += ret; - return ret; + return simple_read_from_buffer(buf, count, pos, tbuf, ret); } static const struct file_operations fops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 09f178a3fcab..0240aee9189e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -138,6 +138,8 @@ TRACE_EVENT(mlx5_fs_del_fg, {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\ + {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\ {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} TRACE_EVENT(mlx5_fs_set_fte, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c new file mode 100644 index 000000000000..d4ec93bde4de --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -0,0 +1,947 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#define CREATE_TRACE_POINTS +#include "fw_tracer.h" +#include "fw_tracer_tracepoint.h" + +static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) +{ + u32 *string_db_base_address_out = tracer->str_db.base_address_out; + u32 *string_db_size_out = tracer->str_db.size_out; + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + void *mtrc_cap_sp; + int err, i; + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CAP, 0, 0); + if (err) { + mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n", + err); + return err; + } + + if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) { + mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n"); + return -ENOTSUPP; + } + + tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver); + tracer->str_db.first_string_trace = + MLX5_GET(mtrc_cap, out, first_string_trace); + tracer->str_db.num_string_trace = + MLX5_GET(mtrc_cap, out, num_string_trace); + tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + + for (i = 0; i < tracer->str_db.num_string_db; i++) { + mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); + string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param, + mtrc_cap_sp, + string_db_base_address); + string_db_size_out[i] = MLX5_GET(mtrc_string_db_param, + mtrc_cap_sp, string_db_size); + } + + return err; +} + +static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer, + u32 *out, u32 out_size, + u8 trace_owner) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + + MLX5_SET(mtrc_cap, in, trace_owner, trace_owner); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size, + MLX5_REG_MTRC_CAP, 0, 1); +} + +static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + int err; + + err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP); + if (err) { + mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n", + err); + return err; + } + + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); + + if (!tracer->owner) + return -EBUSY; + + return 0; +} + +static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer) +{ + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + + mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), + MLX5_FW_TRACER_RELEASE_OWNERSHIP); + tracer->owner = false; +} + +static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + struct device *ddev = &dev->pdev->dev; + dma_addr_t dma; + void *buff; + gfp_t gfp; + int err; + + tracer->buff.size = TRACE_BUFFER_SIZE_BYTE; + + gfp = GFP_KERNEL | __GFP_ZERO; + buff = (void *)__get_free_pages(gfp, + get_order(tracer->buff.size)); + if (!buff) { + err = -ENOMEM; + mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err); + return err; + } + tracer->buff.log_buf = buff; + + dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE); + if (dma_mapping_error(ddev, dma)) { + mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n", + dma_mapping_error(ddev, dma)); + err = -ENOMEM; + goto free_pages; + } + tracer->buff.dma = dma; + + return 0; + +free_pages: + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); + + return err; +} + +static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + struct device *ddev = &dev->pdev->dev; + + if (!tracer->buff.log_buf) + return; + + dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE); + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); +} + +static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + int err, inlen, i; + __be64 *mtt; + void *mkc; + u32 *in; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + + sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2); + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); + mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++) + mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, pd, tracer->buff.pdn); + MLX5_SET(mkc, mkc, bsf_octword_size, 0); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); + MLX5_SET(mkc, mkc, translations_octword_size, + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); + MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma); + MLX5_SET64(mkc, mkc, len, tracer->buff.size); + err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err); + + kvfree(in); + + return err; +} + +static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer) +{ + u32 num_string_db = tracer->str_db.num_string_db; + int i; + + for (i = 0; i < num_string_db; i++) { + kfree(tracer->str_db.buffer[i]); + tracer->str_db.buffer[i] = NULL; + } +} + +static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer) +{ + u32 *string_db_size_out = tracer->str_db.size_out; + u32 num_string_db = tracer->str_db.num_string_db; + int i; + + for (i = 0; i < num_string_db; i++) { + tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL); + if (!tracer->str_db.buffer[i]) + goto free_strings_db; + } + + return 0; + +free_strings_db: + mlx5_fw_tracer_free_strings_db(tracer); + return -ENOMEM; +} + +static void mlx5_tracer_read_strings_db(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, + read_fw_strings_work); + u32 num_of_reads, num_string_db = tracer->str_db.num_string_db; + struct mlx5_core_dev *dev = tracer->dev; + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; + u32 leftovers, offset; + int err = 0, i, j; + u32 *out, outlen; + void *out_value; + + outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES; + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < num_string_db; i++) { + offset = 0; + MLX5_SET(mtrc_stdb, in, string_db_index, i); + num_of_reads = tracer->str_db.size_out[i] / + STRINGS_DB_READ_SIZE_BYTES; + leftovers = (tracer->str_db.size_out[i] % + STRINGS_DB_READ_SIZE_BYTES) / + STRINGS_DB_LEFTOVER_SIZE_BYTES; + + MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES); + for (j = 0; j < num_of_reads; j++) { + MLX5_SET(mtrc_stdb, in, start_offset, offset); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + outlen, MLX5_REG_MTRC_STDB, + 0, 1); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", + err); + goto out_free; + } + + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); + memcpy(tracer->str_db.buffer[i] + offset, out_value, + STRINGS_DB_READ_SIZE_BYTES); + offset += STRINGS_DB_READ_SIZE_BYTES; + } + + /* Strings database is aligned to 64, need to read leftovers*/ + MLX5_SET(mtrc_stdb, in, read_size, + STRINGS_DB_LEFTOVER_SIZE_BYTES); + for (j = 0; j < leftovers; j++) { + MLX5_SET(mtrc_stdb, in, start_offset, offset); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + outlen, MLX5_REG_MTRC_STDB, + 0, 1); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", + err); + goto out_free; + } + + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); + memcpy(tracer->str_db.buffer[i] + offset, out_value, + STRINGS_DB_LEFTOVER_SIZE_BYTES); + offset += STRINGS_DB_LEFTOVER_SIZE_BYTES; + } + } + + tracer->str_db.loaded = true; + +out_free: + kfree(out); +out: + return; +} + +static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + int err; + + MLX5_SET(mtrc_ctrl, in, arm_event, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CTRL, 0, 1); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err); +} + +static const char *VAL_PARM = "%llx"; +static const char *REPLACE_64_VAL_PARM = "%x%x"; +static const char *PARAM_CHAR = "%"; + +static int mlx5_tracer_message_hash(u32 message_id) +{ + return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1); +} + +static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct hlist_head *head = + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; + struct tracer_string_format *cur_string; + + cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL); + if (!cur_string) + return NULL; + + hlist_add_head(&cur_string->hlist, head); + + return cur_string; +} + +static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_string_format *cur_string; + u32 str_ptr, offset; + int i; + + str_ptr = tracer_event->string_event.string_param; + + for (i = 0; i < tracer->str_db.num_string_db; i++) { + if (str_ptr > tracer->str_db.base_address_out[i] && + str_ptr < tracer->str_db.base_address_out[i] + + tracer->str_db.size_out[i]) { + offset = str_ptr - tracer->str_db.base_address_out[i]; + /* add it to the hash */ + cur_string = mlx5_tracer_message_insert(tracer, tracer_event); + if (!cur_string) + return NULL; + cur_string->string = (char *)(tracer->str_db.buffer[i] + + offset); + return cur_string; + } + } + + return NULL; +} + +static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt) +{ + hlist_del(&str_frmt->hlist); + kfree(str_frmt); +} + +static int mlx5_tracer_get_num_of_params(char *str) +{ + char *substr, *pstr = str; + int num_of_params = 0; + + /* replace %llx with %x%x */ + substr = strstr(pstr, VAL_PARM); + while (substr) { + memcpy(substr, REPLACE_64_VAL_PARM, 4); + pstr = substr; + substr = strstr(pstr, VAL_PARM); + } + + /* count all the % characters */ + substr = strstr(str, PARAM_CHAR); + while (substr) { + num_of_params += 1; + str = substr + 1; + substr = strstr(str, PARAM_CHAR); + } + + return num_of_params; +} + +static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head, + u8 event_id, u32 tmsn) +{ + struct tracer_string_format *message; + + hlist_for_each_entry(message, head, hlist) + if (message->event_id == event_id && message->tmsn == tmsn) + return message; + + return NULL; +} + +static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct hlist_head *head = + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; + + return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn); +} + +static void poll_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event, u64 *trace) +{ + u32 timestamp_low, timestamp_mid, timestamp_high, urts; + + tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id); + tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost); + + switch (tracer_event->event_id) { + case TRACER_EVENT_TYPE_TIMESTAMP: + tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP; + urts = MLX5_GET(tracer_timestamp_event, trace, urts); + if (tracer->trc_ver == 0) + tracer_event->timestamp_event.unreliable = !!(urts >> 2); + else + tracer_event->timestamp_event.unreliable = !!(urts & 1); + + timestamp_low = MLX5_GET(tracer_timestamp_event, + trace, timestamp7_0); + timestamp_mid = MLX5_GET(tracer_timestamp_event, + trace, timestamp39_8); + timestamp_high = MLX5_GET(tracer_timestamp_event, + trace, timestamp52_40); + + tracer_event->timestamp_event.timestamp = + ((u64)timestamp_high << 40) | + ((u64)timestamp_mid << 8) | + (u64)timestamp_low; + break; + default: + if (tracer_event->event_id >= tracer->str_db.first_string_trace || + tracer_event->event_id <= tracer->str_db.first_string_trace + + tracer->str_db.num_string_trace) { + tracer_event->type = TRACER_EVENT_TYPE_STRING; + tracer_event->string_event.timestamp = + MLX5_GET(tracer_string_event, trace, timestamp); + tracer_event->string_event.string_param = + MLX5_GET(tracer_string_event, trace, string_param); + tracer_event->string_event.tmsn = + MLX5_GET(tracer_string_event, trace, tmsn); + tracer_event->string_event.tdsn = + MLX5_GET(tracer_string_event, trace, tdsn); + } else { + tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED; + } + break; + } +} + +static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event) +{ + struct tracer_event tracer_event; + u8 event_id; + + event_id = MLX5_GET(tracer_event, ts_event, event_id); + + if (event_id == TRACER_EVENT_TYPE_TIMESTAMP) + poll_trace(tracer, &tracer_event, ts_event); + else + tracer_event.timestamp_event.timestamp = 0; + + return tracer_event.timestamp_event.timestamp; +} + +static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer) +{ + struct tracer_string_format *str_frmt; + struct hlist_node *n; + int i; + + for (i = 0; i < MESSAGE_HASH_SIZE; i++) { + hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist) + mlx5_tracer_clean_message(str_frmt); + } +} + +static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer) +{ + struct tracer_string_format *str_frmt, *tmp_str; + + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, + list) + list_del(&str_frmt->list); +} + +static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, + struct mlx5_core_dev *dev, + u64 trace_timestamp) +{ + char tmp[512]; + + snprintf(tmp, sizeof(tmp), str_frmt->string, + str_frmt->params[0], + str_frmt->params[1], + str_frmt->params[2], + str_frmt->params[3], + str_frmt->params[4], + str_frmt->params[5], + str_frmt->params[6]); + + trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost, + str_frmt->event_id, tmp); + + /* remove it from hash */ + mlx5_tracer_clean_message(str_frmt); +} + +static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_string_format *cur_string; + + if (tracer_event->string_event.tdsn == 0) { + cur_string = mlx5_tracer_get_string(tracer, tracer_event); + if (!cur_string) + return -1; + + cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string); + cur_string->last_param_num = 0; + cur_string->event_id = tracer_event->event_id; + cur_string->tmsn = tracer_event->string_event.tmsn; + cur_string->timestamp = tracer_event->string_event.timestamp; + cur_string->lost = tracer_event->lost_event; + if (cur_string->num_of_params == 0) /* trace with no params */ + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + } else { + cur_string = mlx5_tracer_message_get(tracer, tracer_event); + if (!cur_string) { + pr_debug("%s Got string event for unknown string tdsm: %d\n", + __func__, tracer_event->string_event.tmsn); + return -1; + } + cur_string->last_param_num += 1; + if (cur_string->last_param_num > TRACER_MAX_PARAMS) { + pr_debug("%s Number of params exceeds the max (%d)\n", + __func__, TRACER_MAX_PARAMS); + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + return 0; + } + /* keep the new parameter */ + cur_string->params[cur_string->last_param_num - 1] = + tracer_event->string_event.string_param; + if (cur_string->last_param_num == cur_string->num_of_params) + list_add_tail(&cur_string->list, &tracer->ready_strings_list); + } + + return 0; +} + +static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + struct tracer_timestamp_event timestamp_event = + tracer_event->timestamp_event; + struct tracer_string_format *str_frmt, *tmp_str; + struct mlx5_core_dev *dev = tracer->dev; + u64 trace_timestamp; + + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) { + list_del(&str_frmt->list); + if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0)) + trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | + (str_frmt->timestamp & MASK_6_0); + else + trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | + (str_frmt->timestamp & MASK_6_0); + + mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); + } +} + +static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer, + struct tracer_event *tracer_event) +{ + if (tracer_event->type == TRACER_EVENT_TYPE_STRING) { + mlx5_tracer_handle_string_trace(tracer, tracer_event); + } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) { + if (!tracer_event->timestamp_event.unreliable) + mlx5_tracer_handle_timestamp_trace(tracer, tracer_event); + } else { + pr_debug("%s Got unrecognised type %d for parsing, exiting..\n", + __func__, tracer_event->type); + } + return 0; +} + +static void mlx5_fw_tracer_handle_traces(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = + container_of(work, struct mlx5_fw_tracer, handle_traces_work); + u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK]; + u32 block_count, start_offset, prev_start_offset, prev_consumer_index; + u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event); + struct mlx5_core_dev *dev = tracer->dev; + struct tracer_event tracer_event; + int i; + + mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner); + if (!tracer->owner) + return; + + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; + + /* Copy the block to local buffer to avoid HW override while being processed*/ + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, + TRACER_BLOCK_SIZE_BYTE); + + block_timestamp = + get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]); + + while (block_timestamp > tracer->last_timestamp) { + /* Check block override if its not the first block */ + if (!tracer->last_timestamp) { + u64 *ts_event; + /* To avoid block override be the HW in case of buffer + * wraparound, the time stamp of the previous block + * should be compared to the last timestamp handled + * by the driver. + */ + prev_consumer_index = + (tracer->buff.consumer_index - 1) & (block_count - 1); + prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE; + + ts_event = tracer->buff.log_buf + prev_start_offset + + (TRACES_PER_BLOCK - 1) * trace_event_size; + last_block_timestamp = get_block_timestamp(tracer, ts_event); + /* If previous timestamp different from last stored + * timestamp then there is a good chance that the + * current buffer is overwritten and therefore should + * not be parsed. + */ + if (tracer->last_timestamp != last_block_timestamp) { + mlx5_core_warn(dev, "FWTracer: Events were lost\n"); + tracer->last_timestamp = block_timestamp; + tracer->buff.consumer_index = + (tracer->buff.consumer_index + 1) & (block_count - 1); + break; + } + } + + /* Parse events */ + for (i = 0; i < TRACES_PER_BLOCK ; i++) { + poll_trace(tracer, &tracer_event, &tmp_trace_block[i]); + mlx5_tracer_handle_trace(tracer, &tracer_event); + } + + tracer->buff.consumer_index = + (tracer->buff.consumer_index + 1) & (block_count - 1); + + tracer->last_timestamp = block_timestamp; + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, + TRACER_BLOCK_SIZE_BYTE); + block_timestamp = get_block_timestamp(tracer, + &tmp_trace_block[TRACES_PER_BLOCK - 1]); + } + + mlx5_fw_tracer_arm(dev); +} + +static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; + int err; + + MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); + MLX5_SET(mtrc_conf, in, log_trace_buffer_size, + ilog2(TRACER_BUFFER_PAGE_NUM)); + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CONF, 0, 1); + if (err) + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); + + return err; +} + +static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm) +{ + struct mlx5_core_dev *dev = tracer->dev; + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; + int err; + + MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS); + MLX5_SET(mtrc_ctrl, in, trace_status, status); + MLX5_SET(mtrc_ctrl, in, arm_event, arm); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MTRC_CTRL, 0, 1); + + if (!err && status) + tracer->last_timestamp = 0; + + return err; +} + +static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev = tracer->dev; + int err; + + err = mlx5_fw_tracer_ownership_acquire(tracer); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); + /* Don't fail since ownership can be acquired on a later FW event */ + return 0; + } + + err = mlx5_fw_tracer_set_mtrc_conf(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err); + goto release_ownership; + } + + /* enable tracer & trace events */ + err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err); + goto release_ownership; + } + + mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n"); + return 0; + +release_ownership: + mlx5_fw_tracer_ownership_release(tracer); + return err; +} + +static void mlx5_fw_tracer_ownership_change(struct work_struct *work) +{ + struct mlx5_fw_tracer *tracer = + container_of(work, struct mlx5_fw_tracer, ownership_change_work); + + mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); + if (tracer->owner) { + tracer->owner = false; + tracer->buff.consumer_index = 0; + return; + } + + mlx5_fw_tracer_start(tracer); +} + +/* Create software resources (Buffers, etc ..) */ +struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) +{ + struct mlx5_fw_tracer *tracer = NULL; + int err; + + if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) { + mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n"); + return NULL; + } + + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); + if (!tracer) + return ERR_PTR(-ENOMEM); + + tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer"); + if (!tracer->work_queue) { + err = -ENOMEM; + goto free_tracer; + } + + tracer->dev = dev; + + INIT_LIST_HEAD(&tracer->ready_strings_list); + INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); + INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db); + INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces); + + + err = mlx5_query_mtrc_caps(tracer); + if (err) { + mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err); + goto destroy_workqueue; + } + + err = mlx5_fw_tracer_create_log_buf(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err); + goto destroy_workqueue; + } + + err = mlx5_fw_tracer_allocate_strings_db(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err); + goto free_log_buf; + } + + mlx5_core_dbg(dev, "FWTracer: Tracer created\n"); + + return tracer; + +free_log_buf: + mlx5_fw_tracer_destroy_log_buf(tracer); +destroy_workqueue: + tracer->dev = NULL; + destroy_workqueue(tracer->work_queue); +free_tracer: + kfree(tracer); + return ERR_PTR(err); +} + +/* Create HW resources + start tracer + * must be called before Async EQ is created + */ +int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) +{ + struct mlx5_core_dev *dev; + int err; + + if (IS_ERR_OR_NULL(tracer)) + return 0; + + dev = tracer->dev; + + if (!tracer->str_db.loaded) + queue_work(tracer->work_queue, &tracer->read_fw_strings_work); + + err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); + return err; + } + + err = mlx5_fw_tracer_create_mkey(tracer); + if (err) { + mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err); + goto err_dealloc_pd; + } + + mlx5_fw_tracer_start(tracer); + + return 0; + +err_dealloc_pd: + mlx5_core_dealloc_pd(dev, tracer->buff.pdn); + return err; +} + +/* Stop tracer + Cleanup HW resources + * must be called after Async EQ is destroyed + */ +void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) +{ + if (IS_ERR_OR_NULL(tracer)) + return; + + mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n", + tracer->owner); + + cancel_work_sync(&tracer->ownership_change_work); + cancel_work_sync(&tracer->handle_traces_work); + + if (tracer->owner) + mlx5_fw_tracer_ownership_release(tracer); + + mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); + mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); +} + +/* Free software resources (Buffers, etc ..) */ +void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) +{ + if (IS_ERR_OR_NULL(tracer)) + return; + + mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n"); + + cancel_work_sync(&tracer->read_fw_strings_work); + mlx5_fw_tracer_clean_ready_list(tracer); + mlx5_fw_tracer_clean_print_hash(tracer); + mlx5_fw_tracer_free_strings_db(tracer); + mlx5_fw_tracer_destroy_log_buf(tracer); + flush_workqueue(tracer->work_queue); + destroy_workqueue(tracer->work_queue); + kfree(tracer); +} + +void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +{ + struct mlx5_fw_tracer *tracer = dev->tracer; + + if (!tracer) + return; + + switch (eqe->sub_type) { + case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) + queue_work(tracer->work_queue, &tracer->ownership_change_work); + break; + case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: + if (likely(tracer->str_db.loaded)) + queue_work(tracer->work_queue, &tracer->handle_traces_work); + break; + default: + mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", + eqe->sub_type); + } +} + +EXPORT_TRACEPOINT_SYMBOL(mlx5_fw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h new file mode 100644 index 000000000000..0347f2dd5cee --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __LIB_TRACER_H__ +#define __LIB_TRACER_H__ + +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" + +#define STRINGS_DB_SECTIONS_NUM 8 +#define STRINGS_DB_READ_SIZE_BYTES 256 +#define STRINGS_DB_LEFTOVER_SIZE_BYTES 64 +#define TRACER_BUFFER_PAGE_NUM 64 +#define TRACER_BUFFER_CHUNK 4096 +#define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK) + +#define TRACER_BLOCK_SIZE_BYTE 256 +#define TRACES_PER_BLOCK 32 + +#define TRACER_MAX_PARAMS 7 +#define MESSAGE_HASH_BITS 6 +#define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS) + +#define MASK_52_7 (0x1FFFFFFFFFFF80) +#define MASK_6_0 (0x7F) + +struct mlx5_fw_tracer { + struct mlx5_core_dev *dev; + bool owner; + u8 trc_ver; + struct workqueue_struct *work_queue; + struct work_struct ownership_change_work; + struct work_struct read_fw_strings_work; + + /* Strings DB */ + struct { + u8 first_string_trace; + u8 num_string_trace; + u32 num_string_db; + u32 base_address_out[STRINGS_DB_SECTIONS_NUM]; + u32 size_out[STRINGS_DB_SECTIONS_NUM]; + void *buffer[STRINGS_DB_SECTIONS_NUM]; + bool loaded; + } str_db; + + /* Log Buffer */ + struct { + u32 pdn; + void *log_buf; + dma_addr_t dma; + u32 size; + struct mlx5_core_mkey mkey; + u32 consumer_index; + } buff; + + u64 last_timestamp; + struct work_struct handle_traces_work; + struct hlist_head hash[MESSAGE_HASH_SIZE]; + struct list_head ready_strings_list; +}; + +struct tracer_string_format { + char *string; + int params[TRACER_MAX_PARAMS]; + int num_of_params; + int last_param_num; + u8 event_id; + u32 tmsn; + struct hlist_node hlist; + struct list_head list; + u32 timestamp; + bool lost; +}; + +enum mlx5_fw_tracer_ownership_state { + MLX5_FW_TRACER_RELEASE_OWNERSHIP, + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP, +}; + +enum tracer_ctrl_fields_select { + TRACE_STATUS = 1 << 0, +}; + +enum tracer_event_type { + TRACER_EVENT_TYPE_STRING, + TRACER_EVENT_TYPE_TIMESTAMP = 0xFF, + TRACER_EVENT_TYPE_UNRECOGNIZED, +}; + +enum tracing_mode { + TRACE_TO_MEMORY = 1 << 0, +}; + +struct tracer_timestamp_event { + u64 timestamp; + u8 unreliable; +}; + +struct tracer_string_event { + u32 timestamp; + u32 tmsn; + u32 tdsn; + u32 string_param; +}; + +struct tracer_event { + bool lost_event; + u32 type; + u8 event_id; + union { + struct tracer_string_event string_event; + struct tracer_timestamp_event timestamp_event; + }; +}; + +struct mlx5_ifc_tracer_event_bits { + u8 lost[0x1]; + u8 timestamp[0x7]; + u8 event_id[0x8]; + u8 event_data[0x30]; +}; + +struct mlx5_ifc_tracer_string_event_bits { + u8 lost[0x1]; + u8 timestamp[0x7]; + u8 event_id[0x8]; + u8 tmsn[0xd]; + u8 tdsn[0x3]; + u8 string_param[0x20]; +}; + +struct mlx5_ifc_tracer_timestamp_event_bits { + u8 timestamp7_0[0x8]; + u8 event_id[0x8]; + u8 urts[0x3]; + u8 timestamp52_40[0xd]; + u8 timestamp39_8[0x20]; +}; + +struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); +int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); +void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); +void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); +void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h new file mode 100644 index 000000000000..83f90e9aff45 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __LIB_TRACER_TRACEPOINT_H__ + +#include <linux/tracepoint.h> +#include "fw_tracer.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +/* Tracepoint for FWTracer messages: */ +TRACE_EVENT(mlx5_fw, + TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp, + bool lost, u8 event_id, const char *msg), + + TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), + + TP_STRUCT__entry( + __string(dev_name, dev_name(&tracer->dev->pdev->dev)) + __field(u64, trace_timestamp) + __field(bool, lost) + __field(u8, event_id) + __string(msg, msg) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev)); + __entry->trace_timestamp = trace_timestamp; + __entry->lost = lost; + __entry->event_id = event_id; + __assign_str(msg, msg); + ), + + TP_printk("%s [0x%llx] %d [0x%x] %s", + __get_str(dev_name), + __entry->trace_timestamp, + __entry->lost, __entry->event_id, + __get_str(msg)) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ./diag +#define TRACE_INCLUDE_FILE fw_tracer_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eb9eb7aa953a..c7ed3d20fd54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -137,7 +137,6 @@ struct page_pool; #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 -#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_UMR_WQE_INLINE_SZ \ @@ -148,10 +147,6 @@ struct page_pool; (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) #define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS -#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) -#define MLX5E_XDP_TX_DS_COUNT \ - ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) - #define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -349,6 +344,7 @@ enum { MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_REDIRECT, }; struct mlx5e_sq_wqe_info { @@ -369,16 +365,14 @@ struct mlx5e_txqsq { struct mlx5e_cq cq; - /* write@xmit, read@completion */ - struct { - struct mlx5e_sq_dma *dma_fifo; - struct mlx5e_tx_wqe_info *wqe_info; - } db; - /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; struct mlx5e_sq_stats *stats; + struct { + struct mlx5e_sq_dma *dma_fifo; + struct mlx5e_tx_wqe_info *wqe_info; + } db; void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; @@ -400,30 +394,43 @@ struct mlx5e_txqsq { } recover; } ____cacheline_aligned_in_smp; +struct mlx5e_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct mlx5e_xdp_info { + struct xdp_frame *xdpf; + dma_addr_t dma_addr; + struct mlx5e_dma_info di; +}; + struct mlx5e_xdpsq { /* data path */ - /* dirtied @rx completion */ + /* dirtied @completion */ u16 cc; - u16 pc; + bool redirect_flush; - struct mlx5e_cq cq; + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + bool doorbell; - /* write@xmit, read@completion */ - struct { - struct mlx5e_dma_info *di; - bool doorbell; - bool redirect_flush; - } db; + struct mlx5e_cq cq; /* read only */ struct mlx5_wq_cyc wq; + struct mlx5e_xdpsq_stats *stats; + struct { + struct mlx5e_xdp_info *xdpi; + } db; void __iomem *uar_map; u32 sqn; struct device *pdev; __be32 mkey_be; u8 min_inline_mode; unsigned long state; + unsigned int hw_mtu; /* control path */ struct mlx5_wq_ctrl wq_ctrl; @@ -460,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); } -struct mlx5e_dma_info { - struct page *page; - dma_addr_t addr; -}; - struct mlx5e_wqe_frag_info { struct mlx5e_dma_info *di; u32 offset; @@ -567,7 +569,6 @@ struct mlx5e_rq { /* XDP */ struct bpf_prog *xdp_prog; - unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; DECLARE_BITMAP(flags, 8); struct page_pool *page_pool; @@ -596,6 +597,9 @@ struct mlx5e_channel { __be32 mkey_be; u8 num_tc; + /* XDP_REDIRECT */ + struct mlx5e_xdpsq xdpsq; + /* data path - accessed per napi poll */ struct irq_desc *irq_desc; struct mlx5e_ch_stats *stats; @@ -618,6 +622,8 @@ struct mlx5e_channel_stats { struct mlx5e_ch_stats ch; struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; struct mlx5e_rq_stats rq; + struct mlx5e_xdpsq_stats rq_xdpsq; + struct mlx5e_xdpsq_stats xdpsq; } ____cacheline_aligned_in_smp; enum mlx5e_traffic_types { @@ -648,11 +654,6 @@ enum { MLX5E_STATE_DESTROYING, }; -struct mlx5e_vxlan_db { - spinlock_t lock; /* protect vxlan table */ - struct radix_tree_root tree; -}; - struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; struct mlx5_flow_handle *rule; @@ -810,7 +811,6 @@ struct mlx5e_priv { u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; - struct mlx5e_vxlan_db vxlan; struct workqueue_struct *wq; struct work_struct update_carrier_work; @@ -866,7 +866,8 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback); + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe *wqe, u16 pi); @@ -876,14 +877,13 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -892,7 +892,6 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c new file mode 100644 index 000000000000..1881468dbcfa --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/bpf_trace.h> +#include "en/xdp.h" + +static inline bool +mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, + struct xdp_buff *xdp) +{ + struct mlx5e_xdp_info xdpi; + + xdpi.xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpi.xdpf)) + return false; + xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf); + dma_sync_single_for_device(sq->pdev, xdpi.dma_addr, + xdpi.xdpf->len, PCI_DMA_TODEVICE); + xdpi.di = *di; + + return mlx5e_xmit_xdp_frame(sq, &xdpi); +} + +/* returns true if packet was consumed by xdp */ +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) +{ + struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct xdp_buff xdp; + u32 act; + int err; + + if (!prog) + return false; + + xdp.data = va + *rx_headroom; + xdp_set_data_meta_invalid(&xdp); + xdp.data_end = xdp.data + *len; + xdp.data_hard_start = va; + xdp.rxq = &rq->xdp_rxq; + + act = bpf_prog_run_xdp(prog, &xdp); + switch (act) { + case XDP_PASS: + *rx_headroom = xdp.data - xdp.data_hard_start; + *len = xdp.data_end - xdp.data; + return false; + case XDP_TX: + if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp))) + goto xdp_abort; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ + return true; + case XDP_REDIRECT: + /* When XDP enabled then page-refcnt==1 here */ + err = xdp_do_redirect(rq->netdev, &xdp, prog); + if (unlikely(err)) + goto xdp_abort; + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); + rq->xdpsq.redirect_flush = true; + mlx5e_page_dma_unmap(rq, di); + rq->stats->xdp_redirect++; + return true; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: +xdp_abort: + trace_xdp_exception(rq->netdev, prog, act); + case XDP_DROP: + rq->stats->xdp_drop++; + return true; + } +} + +bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg = wqe->data; + + struct xdp_frame *xdpf = xdpi->xdpf; + dma_addr_t dma_addr = xdpi->dma_addr; + unsigned int dma_len = xdpf->len; + + struct mlx5e_xdpsq_stats *stats = sq->stats; + + prefetchw(wqe); + + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { + stats->err++; + return false; + } + + if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { + if (sq->doorbell) { + /* SQ is full, ring doorbell */ + mlx5e_xmit_xdp_doorbell(sq); + sq->doorbell = false; + } + stats->full++; + return false; + } + + cseg->fm_ce_se = 0; + + /* copy the inline part if required */ + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE); + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + dma_len -= MLX5E_XDP_MIN_INLINE; + dma_addr += MLX5E_XDP_MIN_INLINE; + dseg++; + } + + /* write the dma part */ + dseg->addr = cpu_to_be64(dma_addr); + dseg->byte_count = cpu_to_be32(dma_len); + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); + + /* move page to reference to sq responsibility, + * and mark so it's not put back in page-cache. + */ + sq->db.xdpi[pi] = *xdpi; + sq->pc++; + + sq->doorbell = true; + + stats->xmit++; + return true; +} + +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_xdpsq *sq; + struct mlx5_cqe64 *cqe; + struct mlx5e_rq *rq; + bool is_redirect; + u16 sqcc; + int i; + + sq = container_of(cq, struct mlx5e_xdpsq, cq); + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return false; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return false; + + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); + rq = container_of(sq, struct mlx5e_rq, xdpsq); + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + i = 0; + do { + u16 wqe_counter; + bool last_wqe; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + + do { + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; + + last_wqe = (sqcc == wqe_counter); + sqcc++; + + if (is_redirect) { + xdp_return_frame(xdpi->xdpf); + dma_unmap_single(sq->pdev, xdpi->dma_addr, + xdpi->xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi->di, true); + } + } while (!last_wqe); + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + + sq->stats->cqes += i; + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc = sqcc; + return (i == MLX5E_TX_CQ_POLL_BUDGET); +} + +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) +{ + struct mlx5e_rq *rq; + bool is_redirect; + + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); + rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq); + + while (sq->cc != sq->pc) { + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; + + sq->cc++; + + if (is_redirect) { + xdp_return_frame(xdpi->xdpf); + dma_unmap_single(sq->pdev, xdpi->dma_addr, + xdpi->xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi->di, false); + } + } +} + +int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_xdpsq *sq; + int drops = 0; + int sq_num; + int i; + + if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + sq_num = smp_processor_id(); + + if (unlikely(sq_num >= priv->channels.num)) + return -ENXIO; + + sq = &priv->channels.c[sq_num]->xdpsq; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + struct mlx5e_xdp_info xdpi; + + xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + continue; + } + + xdpi.xdpf = xdpf; + + if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { + dma_unmap_single(sq->pdev, xdpi.dma_addr, + xdpf->len, DMA_TO_DEVICE); + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + mlx5e_xmit_xdp_doorbell(sq); + + return n - drops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h new file mode 100644 index 000000000000..6dfab045925f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_XDP_H__ +#define __MLX5_EN_XDP_H__ + +#include "en.h" + +#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ + MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) +#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) +#define MLX5E_XDP_TX_DS_COUNT \ + ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) + +bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len); +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); + +bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); +int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); + +static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_tx_wqe *wqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ + + wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index f20074dbef32..1dd225380a66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -34,19 +34,26 @@ #ifndef __MLX5E_EN_ACCEL_H__ #define __MLX5E_EN_ACCEL_H__ -#ifdef CONFIG_MLX5_ACCEL - #include <linux/skbuff.h> #include <linux/netdevice.h> #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" #include "en.h" -static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, - struct mlx5e_txqsq *sq, - struct net_device *dev, - struct mlx5e_tx_wqe **wqe, - u16 *pi) +static inline void +mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + udp_hdr(skb)->len = htons(payload_len); +} + +static inline struct sk_buff * +mlx5e_accel_handle_tx(struct sk_buff *skb, + struct mlx5e_txqsq *sq, + struct net_device *dev, + struct mlx5e_tx_wqe **wqe, + u16 *pi) { #ifdef CONFIG_MLX5_EN_TLS if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { @@ -64,9 +71,10 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, } #endif + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + mlx5e_udp_gso_handle_tx_skb(skb); + return skb; } -#endif /* CONFIG_MLX5_ACCEL */ - #endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index c245d8e78509..128a82b1dbfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -37,6 +37,7 @@ #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" +#include "accel/accel.h" #include "en.h" enum { @@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, } struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb) + struct sk_buff *skb, u32 *cqe_bcnt) { struct mlx5e_ipsec_metadata *mdata; - struct ethhdr *old_eth; - struct ethhdr *new_eth; struct xfrm_state *xs; - __be16 *ethtype; - /* Detect inline metadata */ - if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN) - return skb; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) + if (!is_metadata_hdr_valid(skb)) return skb; /* Use the metadata */ @@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, return NULL; } - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); + remove_metadata_hdr(skb); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; return skb; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2bfbbef1b054..ca47c0540904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -41,7 +41,7 @@ #include "en.h" struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb); + struct sk_buff *skb, u32 *cqe_bcnt); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index d167845271c3..eddd7702680b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, u32 caps = mlx5_accel_tls_device_caps(mdev); int ret = -ENOMEM; void *flow; - - if (direction != TLS_OFFLOAD_CTX_DIR_TX) - return -EINVAL; + u32 swid; flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); if (!flow) @@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, if (ret) goto free_flow; + ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, &swid, + direction == TLS_OFFLOAD_CTX_DIR_TX); + if (ret < 0) + goto free_flow; + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - struct mlx5e_tls_offload_context *tx_ctx = + struct mlx5e_tls_offload_context_tx *tx_ctx = mlx5e_get_tls_tx_context(tls_ctx); - u32 swid; - - ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, &swid); - if (ret < 0) - goto free_flow; tx_ctx->swid = htonl(swid); tx_ctx->expected_seq = start_offload_tcp_sn; + } else { + struct mlx5e_tls_offload_context_rx *rx_ctx = + mlx5e_get_tls_rx_context(tls_ctx); + + rx_ctx->handle = htonl(swid); } return 0; @@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev, enum tls_offload_ctx_dir direction) { struct mlx5e_priv *priv = netdev_priv(netdev); + unsigned int handle; - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); + handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? + mlx5e_get_tls_tx_context(tls_ctx)->swid : + mlx5e_get_tls_rx_context(tls_ctx)->handle); - mlx5_accel_tls_del_tx_flow(priv->mdev, swid); - } else { - netdev_err(netdev, "unsupported direction %d\n", direction); - } + mlx5_accel_tls_del_flow(priv->mdev, handle, + direction == TLS_OFFLOAD_CTX_DIR_TX); +} + +static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk, + u32 seq, u64 rcd_sn) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_tls_offload_context_rx *rx_ctx; + + rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); + + netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, + be64_to_cpu(rcd_sn)); + mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); } static const struct tlsdev_ops mlx5e_tls_ops = { .tls_dev_add = mlx5e_tls_add, .tls_dev_del = mlx5e_tls_del, + .tls_dev_resync_rx = mlx5e_tls_resync_rx, }; void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { + u32 caps = mlx5_accel_tls_device_caps(priv->mdev); struct net_device *netdev = priv->netdev; if (!mlx5_accel_is_tls_device(priv->mdev)) return; - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; + if (caps & MLX5_ACCEL_TLS_TX) { + netdev->features |= NETIF_F_HW_TLS_TX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + } + + if (caps & MLX5_ACCEL_TLS_RX) { + netdev->features |= NETIF_F_HW_TLS_RX; + netdev->hw_features |= NETIF_F_HW_TLS_RX; + } + + if (!(caps & MLX5_ACCEL_TLS_LRO)) { + netdev->features &= ~NETIF_F_LRO; + netdev->hw_features &= ~NETIF_F_LRO; + } + netdev->tlsdev_ops = &mlx5e_tls_ops; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index b6162178f621..3f5d72163b56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats { atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_bypass_required; + atomic64_t rx_tls_drop_resync_request; + atomic64_t rx_tls_resync_request; + atomic64_t rx_tls_resync_reply; + atomic64_t rx_tls_auth_fail; }; struct mlx5e_tls { struct mlx5e_tls_sw_stats sw_stats; }; -struct mlx5e_tls_offload_context { - struct tls_offload_context base; +struct mlx5e_tls_offload_context_tx { + struct tls_offload_context_tx base; u32 expected_seq; __be32 swid; }; -static inline struct mlx5e_tls_offload_context * +static inline struct mlx5e_tls_offload_context_tx * mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) { - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > - TLS_OFFLOAD_CONTEXT_SIZE); - return container_of(tls_offload_ctx(tls_ctx), - struct mlx5e_tls_offload_context, + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > + TLS_OFFLOAD_CONTEXT_SIZE_TX); + return container_of(tls_offload_ctx_tx(tls_ctx), + struct mlx5e_tls_offload_context_tx, + base); +} + +struct mlx5e_tls_offload_context_rx { + struct tls_offload_context_rx base; + __be32 handle; +}; + +static inline struct mlx5e_tls_offload_context_rx * +mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > + TLS_OFFLOAD_CONTEXT_SIZE_RX); + return container_of(tls_offload_ctx_rx(tls_ctx), + struct mlx5e_tls_offload_context_rx, base); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 15aef71d1957..92d37459850e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -33,6 +33,14 @@ #include "en_accel/tls.h" #include "en_accel/tls_rxtx.h" +#include "accel/accel.h" + +#include <net/inet6_hashtables.h> +#include <linux/ipv6.h> + +#define SYNDROM_DECRYPTED 0x30 +#define SYNDROM_RESYNC_REQUEST 0x31 +#define SYNDROM_AUTH_FAILED 0x32 #define SYNDROME_OFFLOAD_REQUIRED 32 #define SYNDROME_SYNC 33 @@ -44,10 +52,26 @@ struct sync_info { skb_frag_t frags[MAX_SKB_FRAGS]; }; -struct mlx5e_tls_metadata { +struct recv_metadata_content { + u8 syndrome; + u8 reserved; + __be32 sync_seq; +} __packed; + +struct send_metadata_content { /* One byte of syndrome followed by 3 bytes of swid */ __be32 syndrome_swid; __be16 first_seq; +} __packed; + +struct mlx5e_tls_metadata { + union { + /* from fpga to host */ + struct recv_metadata_content recv; + /* from host to fpga */ + struct send_metadata_content send; + unsigned char raw[6]; + } __packed content; /* packet type ID field */ __be16 ethertype; } __packed; @@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) 2 * ETH_ALEN); eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; + pet->content.send.syndrome_swid = + htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; return 0; } -static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, +static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, u32 tcp_seq, struct sync_info *info) { int remaining, i = 0, ret = -EINVAL; @@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); memcpy(pet, &syndrome, sizeof(syndrome)); - pet->first_seq = htons(tcp_seq); + pet->content.send.first_seq = htons(tcp_seq); /* MLX5 devices don't care about the checksum partial start, offset * and pseudo header @@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, } static struct sk_buff * -mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, +mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_tx_wqe **wqe, u16 *pi, @@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, u16 *pi) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context *context; + struct mlx5e_tls_offload_context_tx *context; struct tls_context *tls_ctx; u32 expected_seq; int datalen; @@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, out: return skb; } + +static int tls_update_resync_sn(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5e_tls_metadata *mdata) +{ + struct sock *sk = NULL; + struct iphdr *iph; + struct tcphdr *th; + __be32 seq; + + if (mdata->ethertype != htons(ETH_P_IP)) + return -EINVAL; + + iph = (struct iphdr *)(mdata + 1); + + th = ((void *)iph) + iph->ihl * 4; + + if (iph->version == 4) { + sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, th->dest, + netdev->ifindex, 0); +#endif + } + if (!sk || sk->sk_state == TCP_TIME_WAIT) { + struct mlx5e_priv *priv = netdev_priv(netdev); + + atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); + goto out; + } + + skb->sk = sk; + skb->destructor = sock_edemux; + + memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); + tls_offload_rx_resync_request(sk, seq); +out: + return 0; +} + +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt) +{ + struct mlx5e_tls_metadata *mdata; + struct mlx5e_priv *priv; + + if (!is_metadata_hdr_valid(skb)) + return; + + /* Use the metadata */ + mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); + switch (mdata->content.recv.syndrome) { + case SYNDROM_DECRYPTED: + skb->decrypted = 1; + break; + case SYNDROM_RESYNC_REQUEST: + tls_update_resync_sn(netdev, skb, mdata); + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); + break; + case SYNDROM_AUTH_FAILED: + /* Authentication failure will be observed and verified by kTLS */ + priv = netdev_priv(netdev); + atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); + break; + default: + /* Bypass the metadata header to others */ + return; + } + + remove_metadata_hdr(skb); + *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 405dfd302225..311667ec71b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_tx_wqe **wqe, u16 *pi); +void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, + u32 *cqe_bcnt); + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c592678ab5f1..a2fb21ca5767 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -45,8 +45,9 @@ #include "en_accel/tls.h" #include "accel/ipsec.h" #include "accel/tls.h" -#include "vxlan.h" +#include "lib/vxlan.h" #include "en/port.h" +#include "en/xdp.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -96,14 +97,19 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) { - if (!params->xdp_prog) { - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + u32 frag_sz; - return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); - } + linear_rq_headroom += NET_IP_ALIGN; + + frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); - return PAGE_SIZE; + if (params->xdp_prog && frag_sz < PAGE_SIZE) + frag_sz = PAGE_SIZE; + + return frag_sz; } static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) @@ -270,12 +276,9 @@ void mlx5e_update_stats_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); + mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->profile->update_stats(priv); - queue_delayed_work(priv->wq, dwork, - msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); - } + priv->profile->update_stats(priv); mutex_unlock(&priv->state_lock); } @@ -352,8 +355,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); - rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info), - GFP_KERNEL, cpu_to_node(c->cpu)); + rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, + sizeof(*rq->mpwqe.info)), + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) return -ENOMEM; @@ -487,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; @@ -670,7 +673,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err_free: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -702,7 +705,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - kfree(rq->mpwqe.info); + kvfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@ -879,7 +882,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) /* UMR WQE (if in progress) is always at wq->head */ if (rq->mpwqe.umr_in_progress) - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); + rq->dealloc_wqe(rq, wq->head); while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; @@ -965,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kfree(sq->db.di); + kvfree(sq->db.xdpi); } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di), - GFP_KERNEL, numa); - if (!sq->db.di) { + sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)), + GFP_KERNEL, numa); + if (!sq->db.xdpi) { mlx5e_free_xdpsq_db(sq); return -ENOMEM; } @@ -985,7 +988,8 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_xdpsq *sq) + struct mlx5e_xdpsq *sq, + bool is_redirect) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; @@ -997,6 +1001,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->stats = is_redirect ? + &c->priv->channel_stats[c->ix].xdpsq : + &c->priv->channel_stats[c->ix].rq_xdpsq; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1024,15 +1032,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) { - kfree(sq->db.ico_wqe); + kvfree(sq->db.ico_wqe); } static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); - sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe), - GFP_KERNEL, numa); + sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.ico_wqe)), + GFP_KERNEL, numa); if (!sq->db.ico_wqe) return -ENOMEM; @@ -1077,8 +1086,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { - kfree(sq->db.wqe_info); - kfree(sq->db.dma_fifo); + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); } static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) @@ -1086,10 +1095,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; - sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo), - GFP_KERNEL, numa); - sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info), - GFP_KERNEL, numa); + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.wqe_info)), + GFP_KERNEL, numa); if (!sq->db.dma_fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; @@ -1523,7 +1534,8 @@ static void mlx5e_close_icosq(struct mlx5e_icosq *sq) static int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_xdpsq *sq) + struct mlx5e_xdpsq *sq, + bool is_redirect) { unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; struct mlx5e_create_sq_param csp = {}; @@ -1531,7 +1543,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, int err; int i; - err = mlx5e_alloc_xdpsq(c, params, param, sq); + err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect); if (err) return err; @@ -1540,6 +1552,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; + if (is_redirect) + set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); if (err) @@ -1893,7 +1907,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1922,10 +1936,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); if (err) goto err_close_tx_cqs; + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); + if (err) + goto err_close_xdp_tx_cqs; + /* XDP SQ CQ params are same as normal TXQ sq CQ params */ err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->rq.xdpsq.cq) : 0; @@ -1942,7 +1960,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq; - err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; + err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0; if (err) goto err_close_sqs; @@ -1950,9 +1968,17 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_xdp_sq; + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true); + if (err) + goto err_close_rq; + *cp = c; return 0; + +err_close_rq: + mlx5e_close_rq(&c->rq); + err_close_xdp_sq: if (c->xdp) mlx5e_close_xdpsq(&c->rq.xdpsq); @@ -1971,6 +1997,9 @@ err_disable_napi: err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); +err_close_xdp_tx_cqs: + mlx5e_close_cq(&c->xdpsq.cq); + err_close_tx_cqs: mlx5e_close_tx_cqs(c); @@ -1979,7 +2008,7 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); - kfree(c); + kvfree(c); return err; } @@ -2005,6 +2034,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) static void mlx5e_close_channel(struct mlx5e_channel *c) { + mlx5e_close_xdpsq(&c->xdpsq); mlx5e_close_rq(&c->rq); if (c->xdp) mlx5e_close_xdpsq(&c->rq.xdpsq); @@ -2014,11 +2044,12 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) if (c->xdp) mlx5e_close_cq(&c->rq.xdpsq.cq); mlx5e_close_cq(&c->rq.cq); + mlx5e_close_cq(&c->xdpsq.cq); mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); - kfree(c); + kvfree(c); } #define DEFAULT_FRAG_SIZE (2048) @@ -2276,7 +2307,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, chs->num = chs->params.num_channels; chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); - cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); if (!chs->c || !cparam) goto err_free; @@ -2287,7 +2318,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } - kfree(cparam); + kvfree(cparam); return 0; err_close_channels: @@ -2296,7 +2327,7 @@ err_close_channels: err_free: kfree(chs->c); - kfree(cparam); + kvfree(cparam); chs->num = 0; return err; } @@ -2943,7 +2974,7 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5e_vxlan_allowed(priv->mdev)) + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) udp_tunnel_get_rx_info(netdev); return err; @@ -3371,7 +3402,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, priv); @@ -3405,6 +3436,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); @@ -3703,6 +3737,14 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; + if (params->xdp_prog && + !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", + new_mtu, MLX5E_XDP_MAX_MTU); + err = -EINVAL; + goto out; + } + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); @@ -3928,6 +3970,57 @@ static int mlx5e_get_vf_stats(struct net_device *dev, } #endif +struct mlx5e_vxlan_work { + struct work_struct work; + struct mlx5e_priv *priv; + u16 port; +}; + +static void mlx5e_vxlan_add_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5_vxlan_add_port(priv->mdev->vxlan, port); + mutex_unlock(&priv->state_lock); + + kfree(vxlan_work); +} + +static void mlx5e_vxlan_del_work(struct work_struct *work) +{ + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; + + mutex_lock(&priv->state_lock); + mlx5_vxlan_del_port(priv->mdev->vxlan, port); + mutex_unlock(&priv->state_lock); + kfree(vxlan_work); +} + +static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) +{ + struct mlx5e_vxlan_work *vxlan_work; + + vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); + if (!vxlan_work) + return; + + if (add) + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); + else + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); + + vxlan_work->priv = priv; + vxlan_work->port = port; + queue_work(priv->wq, &vxlan_work->work); +} + static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) { @@ -3936,10 +4029,10 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5e_vxlan_allowed(priv->mdev)) + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) return; - mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); + mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); } static void mlx5e_del_vxlan_port(struct net_device *netdev, @@ -3950,10 +4043,10 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (!mlx5e_vxlan_allowed(priv->mdev)) + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) return; - mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); + mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); } static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, @@ -3984,7 +4077,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, port = be16_to_cpu(udph->dest); /* Verify if UDP port is being offloaded by HW */ - if (mlx5e_vxlan_lookup_port(priv, port)) + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) return features; } @@ -4091,26 +4184,47 @@ static void mlx5e_tx_timeout(struct net_device *dev) queue_work(priv->wq, &priv->tx_timeout_work); } +static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) +{ + struct net_device *netdev = priv->netdev; + struct mlx5e_channels new_channels = {}; + + if (priv->channels.params.lro_en) { + netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); + return -EINVAL; + } + + if (MLX5_IPSEC_DEV(priv->mdev)) { + netdev_warn(netdev, "can't set XDP with IPSec offload\n"); + return -EINVAL; + } + + new_channels.params = priv->channels.params; + new_channels.params.xdp_prog = prog; + + if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", + new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); + return -EINVAL; + } + + return 0; +} + static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); struct bpf_prog *old_prog; - int err = 0; bool reset, was_opened; + int err = 0; int i; mutex_lock(&priv->state_lock); - if ((netdev->features & NETIF_F_LRO) && prog) { - netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); - err = -EINVAL; - goto unlock; - } - - if ((netdev->features & NETIF_F_HW_ESP) && prog) { - netdev_warn(netdev, "can't set XDP with IPSec offload\n"); - err = -EINVAL; - goto unlock; + if (prog) { + err = mlx5e_xdp_allowed(priv, prog); + if (err) + goto unlock; } was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); @@ -4193,7 +4307,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) return mlx5e_xdp_set(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_id = mlx5e_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; @@ -4240,6 +4353,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { #endif .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, + .ndo_xdp_xmit = mlx5e_xdp_xmit, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif @@ -4535,8 +4649,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { - netdev->hw_features |= NETIF_F_GSO_PARTIAL; + if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4544,7 +4657,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; } - if (mlx5e_vxlan_allowed(mdev)) { + if (mlx5_vxlan_allowed(mdev->vxlan)) { netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | @@ -4561,6 +4674,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } + netdev->hw_features |= NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->features |= NETIF_F_GSO_UDP_L4; + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); if (fcs_supported) @@ -4650,14 +4768,12 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); mlx5e_build_tc2txq_maps(priv); - mlx5e_vxlan_init(priv); } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); - mlx5e_vxlan_cleanup(priv); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2b8040a3cdbd..8e3c5b4b90ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); return 0; @@ -897,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + /* update HW stats in background for next time */ + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d3a1dd20e41d..15d8ae28c040 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -34,7 +34,6 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> -#include <linux/bpf_trace.h> #include <net/busy_poll.h> #include <net/ip6_checksum.h> #include <net/page_pool.h> @@ -44,7 +43,9 @@ #include "en_rep.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls_rxtx.h" #include "lib/clock.h" +#include "en/xdp.h" static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { @@ -238,8 +239,7 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return 0; } -static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info) +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); } @@ -276,10 +276,11 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, } static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *frag) + struct mlx5e_wqe_frag_info *frag, + bool recycle) { if (frag->last_in_page) - mlx5e_page_release(rq, frag->di, true); + mlx5e_page_release(rq, frag->di, recycle); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -307,25 +308,26 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, free_frags: while (--i >= 0) - mlx5e_put_rx_frag(rq, --frag); + mlx5e_put_rx_frag(rq, --frag, true); return err; } static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) + struct mlx5e_wqe_frag_info *wi, + bool recycle) { int i; for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) - mlx5e_put_rx_frag(rq, wi); + mlx5e_put_rx_frag(rq, wi, recycle); } void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, false); } static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) @@ -395,7 +397,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev, } } -void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) +static void +mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) { const bool no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); @@ -404,7 +407,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) - mlx5e_page_release(rq, &dma_info[i], true); + mlx5e_page_release(rq, &dma_info[i], recycle); } static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) @@ -487,7 +490,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); return 0; @@ -504,8 +507,8 @@ err_unmap: void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; - - mlx5e_free_rx_mpwqe(rq, wi); + /* Don't recycle, this function is called on rq/netdev close */ + mlx5e_free_rx_mpwqe(rq, wi, false); } bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) @@ -601,6 +604,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) if (!rq->mpwqe.umr_in_progress) mlx5e_alloc_rx_mpwqe(rq, wq->head); + else + rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; return false; } @@ -795,6 +800,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct net_device *netdev = rq->netdev; skb->mac_len = ETH_HLEN; + +#ifdef CONFIG_MLX5_EN_TLS + mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); +#endif + if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); @@ -839,135 +849,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } -static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) -{ - struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_tx_wqe *wqe; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ - - wqe = mlx5_wq_cyc_get_wqe(wq, pi); - - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); -} - -static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - const struct xdp_buff *xdp) -{ - struct mlx5e_xdpsq *sq = &rq->xdpsq; - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - struct mlx5_wqe_data_seg *dseg; - - ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; - dma_addr_t dma_addr = di->addr + data_offset; - unsigned int dma_len = xdp->data_end - xdp->data; - - struct mlx5e_rq_stats *stats = rq->stats; - - prefetchw(wqe); - - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { - stats->xdp_drop++; - return false; - } - - if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { - if (sq->db.doorbell) { - /* SQ is full, ring doorbell */ - mlx5e_xmit_xdp_doorbell(sq); - sq->db.doorbell = false; - } - stats->xdp_tx_full++; - return false; - } - - dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); - - cseg->fm_ce_se = 0; - - dseg = (struct mlx5_wqe_data_seg *)eseg + 1; - - /* copy the inline part if required */ - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); - eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); - dma_len -= MLX5E_XDP_MIN_INLINE; - dma_addr += MLX5E_XDP_MIN_INLINE; - dseg++; - } - - /* write the dma part */ - dseg->addr = cpu_to_be64(dma_addr); - dseg->byte_count = cpu_to_be32(dma_len); - - cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - - /* move page to reference to sq responsibility, - * and mark so it's not put back in page-cache. - */ - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ - sq->db.di[pi] = *di; - sq->pc++; - - sq->db.doorbell = true; - - stats->xdp_tx++; - return true; -} - -/* returns true if packet was consumed by xdp */ -static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len) -{ - struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); - struct xdp_buff xdp; - u32 act; - int err; - - if (!prog) - return false; - - xdp.data = va + *rx_headroom; - xdp_set_data_meta_invalid(&xdp); - xdp.data_end = xdp.data + *len; - xdp.data_hard_start = va; - xdp.rxq = &rq->xdp_rxq; - - act = bpf_prog_run_xdp(prog, &xdp); - switch (act) { - case XDP_PASS: - *rx_headroom = xdp.data - xdp.data_hard_start; - *len = xdp.data_end - xdp.data; - return false; - case XDP_TX: - if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) - trace_xdp_exception(rq->netdev, prog, act); - return true; - case XDP_REDIRECT: - /* When XDP enabled then page-refcnt==1 here */ - err = xdp_do_redirect(rq->netdev, &xdp, prog); - if (!err) { - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); - rq->xdpsq.db.redirect_flush = true; - mlx5e_page_dma_unmap(rq, di); - } - return true; - default: - bpf_warn_invalid_xdp_action(act); - case XDP_ABORTED: - trace_xdp_exception(rq->netdev, prog, act); - case XDP_DROP: - rq->stats->xdp_drop++; - return true; - } -} - static inline struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, u32 frag_size, u16 headroom, @@ -1105,7 +986,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1147,7 +1028,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } @@ -1218,6 +1099,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ prefetch(data); rcu_read_lock(); @@ -1261,7 +1143,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats->mpwqe_filler++; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; goto mpwrq_cqe_out; } @@ -1281,7 +1166,7 @@ mpwrq_cqe_out: wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); - mlx5e_free_rx_mpwqe(rq, wi); + mlx5e_free_rx_mpwqe(rq, wi, true); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } @@ -1317,14 +1202,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) rq->handle_rx_cqe(rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); - if (xdpsq->db.doorbell) { + if (xdpsq->doorbell) { mlx5e_xmit_xdp_doorbell(xdpsq); - xdpsq->db.doorbell = false; + xdpsq->doorbell = false; } - if (xdpsq->db.redirect_flush) { + if (xdpsq->redirect_flush) { xdp_do_flush_map(); - xdpsq->db.redirect_flush = false; + xdpsq->redirect_flush = false; } mlx5_cqwq_update_db_record(&cq->wq); @@ -1335,78 +1220,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) return work_done; } -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) -{ - struct mlx5e_xdpsq *sq; - struct mlx5_cqe64 *cqe; - struct mlx5e_rq *rq; - u16 sqcc; - int i; - - sq = container_of(cq, struct mlx5e_xdpsq, cq); - - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return false; - - cqe = mlx5_cqwq_get_cqe(&cq->wq); - if (!cqe) - return false; - - rq = container_of(sq, struct mlx5e_rq, xdpsq); - - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), - * otherwise a cq overrun may occur - */ - sqcc = sq->cc; - - i = 0; - do { - u16 wqe_counter; - bool last_wqe; - - mlx5_cqwq_pop(&cq->wq); - - wqe_counter = be16_to_cpu(cqe->wqe_counter); - - do { - struct mlx5e_dma_info *di; - u16 ci; - - last_wqe = (sqcc == wqe_counter); - - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - di = &sq->db.di[ci]; - - sqcc++; - /* Recycle RX page */ - mlx5e_page_release(rq, di, true); - } while (!last_wqe); - } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); - - mlx5_cqwq_update_db_record(&cq->wq); - - /* ensure cq space is freed before enabling more cqes */ - wmb(); - - sq->cc = sqcc; - return (i == MLX5E_TX_CQ_POLL_BUDGET); -} - -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) -{ - struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); - struct mlx5e_dma_info *di; - u16 ci; - - while (sq->cc != sq->pc) { - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); - di = &sq->db.di[ci]; - sq->cc++; - - mlx5e_page_release(rq, di, false); - } -} - #ifdef CONFIG_MLX5_CORE_IPOIB #define MLX5_IB_GRH_DGID_OFFSET 24 @@ -1508,7 +1321,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); mlx5_wq_cyc_pop(wq); } @@ -1531,19 +1344,19 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); goto wq_cyc_pop; } - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); + skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); if (unlikely(!skb)) { - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe(rq, wi); + mlx5e_free_rx_wqe(rq, wi, true); wq_cyc_pop: mlx5_wq_cyc_pop(wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1646859974ce..12fdf5c92b67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, @@ -58,8 +59,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, @@ -67,10 +71,17 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, @@ -80,6 +91,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, }; @@ -118,6 +134,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; + struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; int j; @@ -131,11 +149,15 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; - s->rx_xdp_drop += rq_stats->xdp_drop; - s->rx_xdp_tx += rq_stats->xdp_tx; - s->rx_xdp_tx_full += rq_stats->xdp_tx_full; + s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_redirect += rq_stats->xdp_redirect; + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_full += xdpsq_stats->full; + s->rx_xdp_tx_err += xdpsq_stats->err; + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; @@ -145,7 +167,17 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; - s->ch_eq_rearm += ch_stats->eq_rearm; + s->rx_congst_umr += rq_stats->congst_umr; + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; + s->ch_eq_rearm += ch_stats->eq_rearm; + /* xdp redirect */ + s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_full += xdpsq_red_stats->full; + s->tx_xdp_err += xdpsq_red_stats->err; + s->tx_xdp_cqes += xdpsq_red_stats->cqes; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; @@ -157,8 +189,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; + s->tx_udp_seg_rem += sq_stats->udp_seg_rem; s->tx_queue_dropped += sq_stats->dropped; s->tx_cqe_err += sq_stats->cqe_err; s->tx_recover += sq_stats->recover; @@ -170,6 +204,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; #endif + s->tx_cqes += sq_stats->cqes; } } @@ -1106,13 +1141,13 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, @@ -1122,6 +1157,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, }; static const struct counter_desc sq_stats_desc[] = { @@ -1140,16 +1176,37 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; +static const struct counter_desc rq_xdpsq_stats_desc[] = { + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +}; + +static const struct counter_desc xdpsq_stats_desc[] = { + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +}; + static const struct counter_desc ch_stats_desc[] = { + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) +#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) @@ -1158,7 +1215,9 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + - (NUM_SQ_STATS * max_nch * priv->max_opened_tc); + (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + + (NUM_RQ_XDPSQ_STATS * max_nch) + + (NUM_XDPSQ_STATS * max_nch); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -1172,9 +1231,14 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, sprintf(data + (idx++) * ETH_GSTRING_LEN, ch_stats_desc[j].format, i); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_xdpsq_stats_desc[j].format, i); + } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) @@ -1183,6 +1247,11 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, sq_stats_desc[j].format, priv->channel_tc2txq[i][tc]); + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_XDPSQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + xdpsq_stats_desc[j].format, i); + return idx; } @@ -1198,11 +1267,16 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, ch_stats_desc, j); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, + rq_xdpsq_stats_desc, j); + } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) @@ -1211,6 +1285,12 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_XDPSQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, + xdpsq_stats_desc, j); + return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 643153bb3607..a4c035aedd46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -44,6 +44,8 @@ #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) struct counter_desc { @@ -61,6 +63,7 @@ struct mlx5e_sw_stats { u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; u64 tx_added_vlan_packets; + u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; u64 rx_removed_vlan_packets; @@ -69,8 +72,11 @@ struct mlx5e_sw_stats { u64 rx_csum_complete; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; - u64 rx_xdp_tx; + u64 rx_xdp_redirect; + u64 rx_xdp_tx_xmit; u64 rx_xdp_tx_full; + u64 rx_xdp_tx_err; + u64 rx_xdp_tx_cqe; u64 tx_csum_none; u64 tx_csum_partial; u64 tx_csum_partial_inner; @@ -78,10 +84,17 @@ struct mlx5e_sw_stats { u64 tx_queue_dropped; u64 tx_xmit_more; u64 tx_recover; + u64 tx_cqes; u64 tx_queue_wake; + u64 tx_udp_seg_rem; u64 tx_cqe_err; + u64 tx_xdp_xmit; + u64 tx_xdp_full; + u64 tx_xdp_err; + u64 tx_xdp_cqes; u64 rx_wqe_err; - u64 rx_mpwqe_filler; + u64 rx_mpwqe_filler_cqes; + u64 rx_mpwqe_filler_strides; u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; @@ -91,6 +104,11 @@ struct mlx5e_sw_stats { u64 rx_cache_empty; u64 rx_cache_busy; u64 rx_cache_waive; + u64 rx_congst_umr; + u64 ch_events; + u64 ch_poll; + u64 ch_arm; + u64 ch_aff_change; u64 ch_eq_rearm; #ifdef CONFIG_MLX5_EN_TLS @@ -168,10 +186,10 @@ struct mlx5e_rq_stats { u64 lro_bytes; u64 removed_vlan_packets; u64 xdp_drop; - u64 xdp_tx; - u64 xdp_tx_full; + u64 xdp_redirect; u64 wqe_err; - u64 mpwqe_filler; + u64 mpwqe_filler_cqes; + u64 mpwqe_filler_strides; u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; @@ -181,6 +199,7 @@ struct mlx5e_rq_stats { u64 cache_empty; u64 cache_busy; u64 cache_waive; + u64 congst_umr; }; struct mlx5e_sq_stats { @@ -196,6 +215,7 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; + u64 udp_seg_rem; #ifdef CONFIG_MLX5_EN_TLS u64 tls_ooo; u64 tls_resync_bytes; @@ -206,11 +226,24 @@ struct mlx5e_sq_stats { u64 dropped; u64 recover; /* dirtied @completion */ - u64 wake ____cacheline_aligned_in_smp; + u64 cqes ____cacheline_aligned_in_smp; + u64 wake; u64 cqe_err; }; +struct mlx5e_xdpsq_stats { + u64 xmit; + u64 full; + u64 err; + /* dirtied @completion */ + u64 cqes ____cacheline_aligned_in_smp; +}; + struct mlx5e_ch_stats { + u64 events; + u64 poll; + u64 arm; + u64 aff_change; u64 eq_rearm; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3a2c4e548226..c28fe469b04a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -50,7 +50,7 @@ #include "en_rep.h" #include "en_tc.h" #include "eswitch.h" -#include "vxlan.h" +#include "lib/vxlan.h" #include "fs_core.h" #include "en/port.h" @@ -1032,10 +1032,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) * dst ip pair */ n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); - if (!n) { - WARN(1, "The neighbour already freed\n"); + if (!n) return; - } neigh_event_send(n, NULL); neigh_release(n); @@ -1126,16 +1124,12 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - struct net_device *up_dev = uplink_rpriv->netdev; - struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) goto vxlan_match_offload_err; - if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { @@ -1213,6 +1207,26 @@ vxlan_match_offload_err: MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_dissector_key_ip *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + f->key); + struct flow_dissector_key_ip *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_IP, + f->mask); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + } + /* Enforce DMAC when offloading incoming tunneled flows. * Flow counters require a match on the DMAC. */ @@ -1237,6 +1251,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); u16 addr_type = 0; u8 ip_proto = 0; @@ -1247,6 +1265,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_CVLAN) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | @@ -1256,7 +1275,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | BIT(FLOW_DISSECTOR_KEY_TCP) | - BIT(FLOW_DISSECTOR_KEY_IP))) { + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; @@ -1327,9 +1347,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); - if (mask->vlan_id || mask->vlan_priority) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { + if (key->vlan_tpid == htons(ETH_P_8021AD)) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + svlan_tag, 1); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + cvlan_tag, 1); + } MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); @@ -1341,6 +1370,41 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CVLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CVLAN, + f->mask); + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { + if (key->vlan_tpid == htons(ETH_P_8021AD)) { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_svlan_tag, 1); + MLX5_SET(fte_match_set_misc, misc_v, + outer_second_svlan_tag, 1); + } else { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_cvlan_tag, 1); + MLX5_SET(fte_match_set_misc, misc_v, + outer_second_cvlan_tag, 1); + } + + MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, + mask->vlan_id); + MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, + key->vlan_id); + MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, + mask->vlan_priority); + MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, + key->vlan_priority); + + *match_level = MLX5_MATCH_L2; + } + } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = skb_flow_dissector_target(f->dissector, @@ -2082,7 +2146,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi4 *fl4, struct neighbour **out_n, - int *out_ttl) + u8 *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv; @@ -2106,7 +2170,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, else *out_dev = rt->dst.dev; - *out_ttl = ip4_dst_hoplimit(&rt->dst); + if (!(*out_ttl)) + *out_ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); ip_rt_put(rt); if (!n) @@ -2135,7 +2200,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct net_device **out_dev, struct flowi6 *fl6, struct neighbour **out_n, - int *out_ttl) + u8 *out_ttl) { struct neighbour *n = NULL; struct dst_entry *dst; @@ -2150,7 +2215,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, if (ret < 0) return ret; - *out_ttl = ip6_dst_hoplimit(dst); + if (!(*out_ttl)) + *out_ttl = ip6_dst_hoplimit(dst); uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ @@ -2174,7 +2240,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, static void gen_vxlan_header_ipv4(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - int ttl, + u8 tos, u8 ttl, __be32 daddr, __be32 saddr, __be16 udp_dst_port, @@ -2194,6 +2260,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev, ip->daddr = daddr; ip->saddr = saddr; + ip->tos = tos; ip->ttl = ttl; ip->protocol = IPPROTO_UDP; ip->version = 0x4; @@ -2207,7 +2274,7 @@ static void gen_vxlan_header_ipv4(struct net_device *out_dev, static void gen_vxlan_header_ipv6(struct net_device *out_dev, char buf[], int encap_size, unsigned char h_dest[ETH_ALEN], - int ttl, + u8 tos, u8 ttl, struct in6_addr *daddr, struct in6_addr *saddr, __be16 udp_dst_port, @@ -2224,7 +2291,7 @@ static void gen_vxlan_header_ipv6(struct net_device *out_dev, ether_addr_copy(eth->h_source, out_dev->dev_addr); eth->h_proto = htons(ETH_P_IPV6); - ip6_flow_hdr(ip6h, 0, 0); + ip6_flow_hdr(ip6h, tos, 0); /* the HW fills up ipv6 payload len */ ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = ttl; @@ -2246,9 +2313,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device *out_dev; struct neighbour *n = NULL; struct flowi4 fl4 = {}; + u8 nud_state, tos, ttl; char *encap_header; - int ttl, err; - u8 nud_state; + int err; if (max_encap_size < ipv4_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -2269,6 +2336,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = -EOPNOTSUPP; goto free_encap; } + + tos = tun_key->tos; + ttl = tun_key->ttl; + fl4.flowi4_tos = tun_key->tos; fl4.daddr = tun_key->u.ipv4.dst; fl4.saddr = tun_key->u.ipv4.src; @@ -2303,7 +2374,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: gen_vxlan_header_ipv4(out_dev, encap_header, - ipv4_encap_size, e->h_dest, ttl, + ipv4_encap_size, e->h_dest, tos, ttl, fl4.daddr, fl4.saddr, tun_key->tp_dst, tunnel_id_to_key32(tun_key->tun_id)); @@ -2351,9 +2422,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, struct net_device *out_dev; struct neighbour *n = NULL; struct flowi6 fl6 = {}; + u8 nud_state, tos, ttl; char *encap_header; - int err, ttl = 0; - u8 nud_state; + int err; if (max_encap_size < ipv6_encap_size) { mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", @@ -2375,6 +2446,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, goto free_encap; } + tos = tun_key->tos; + ttl = tun_key->ttl; + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); fl6.daddr = tun_key->u.ipv6.dst; fl6.saddr = tun_key->u.ipv6.src; @@ -2409,7 +2483,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, switch (e->tunnel_type) { case MLX5_HEADER_TYPE_VXLAN: gen_vxlan_header_ipv6(out_dev, encap_header, - ipv6_encap_size, e->h_dest, ttl, + ipv6_encap_size, e->h_dest, tos, ttl, &fl6.daddr, &fl6.saddr, tun_key->tp_dst, tunnel_id_to_key32(tun_key->tun_id)); @@ -2455,11 +2529,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, - REP_ETH); - struct net_device *up_dev = uplink_rpriv->netdev; unsigned short family = ip_tunnel_info_af(tun_info); - struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct ip_tunnel_key *key = &tun_info->key; struct mlx5e_encap_entry *e; @@ -2479,7 +2549,7 @@ vxlan_encap_offload_err: return -EOPNOTSUPP; } - if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && + if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { @@ -2535,6 +2605,56 @@ out_err: return err; } +static int parse_tc_vlan_action(struct mlx5e_priv *priv, + const struct tc_action *a, + struct mlx5_esw_flow_attr *attr, + u32 *action) +{ + u8 vlan_idx = attr->total_vlan; + + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) + return -EOPNOTSUPP; + + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; + } else { + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + } + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { + attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a); + attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a); + attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a); + if (!attr->vlan_proto[vlan_idx]) + attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); + + if (vlan_idx) { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, + MLX5_FS_VLAN_DEPTH)) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; + } else { + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && + (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a))) + return -EOPNOTSUPP; + + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + } + } else { /* action is TCA_VLAN_ACT_MODIFY */ + return -EOPNOTSUPP; + } + + attr->total_vlan = vlan_idx + 1; + + return 0; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -2546,6 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); bool encap = false; u32 action = 0; + int err; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -2562,8 +2683,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_pedit(a)) { - int err; - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, parse_attr); if (err) @@ -2630,23 +2749,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } if (is_tcf_vlan(a)) { - if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; - } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; - attr->vlan_vid = tcf_vlan_push_vid(a); - if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { - attr->vlan_prio = tcf_vlan_push_prio(a); - attr->vlan_proto = tcf_vlan_push_proto(a); - if (!attr->vlan_proto) - attr->vlan_proto = htons(ETH_P_8021Q); - } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a)) { - return -EOPNOTSUPP; - } - } else { /* action is TCA_VLAN_ACT_MODIFY */ - return -EOPNOTSUPP; - } + err = parse_tc_vlan_action(priv, a, attr, &action); + + if (err) + return err; + attr->mirror_count = attr->out_count; continue; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f29deb44bf3b..ae73ea992845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -66,22 +66,21 @@ static inline void mlx5e_tx_dma_unmap(struct device *pdev, } } +static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, enum mlx5e_dma_map_type map_type) { - u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; - - sq->db.dma_fifo[i].addr = addr; - sq->db.dma_fifo[i].size = size; - sq->db.dma_fifo[i].type = map_type; - sq->dma_fifo_pc++; -} + struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); -static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) -{ - return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; + dma->addr = addr; + dma->size = size; + dma->type = map_type; } static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) @@ -111,10 +110,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb #endif u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); - int channel_ix = fallback(dev, skb); + int channel_ix = fallback(dev, skb, NULL); u16 num_channels; int up = 0; @@ -228,7 +228,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); stats->tso_packets++; stats->tso_bytes += skb->len - ihs; } @@ -443,12 +446,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) sq = priv->txq2sq[skb_get_queue_mapping(skb)]; mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_ACCEL /* might send skbs and update wqe and pi */ skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); if (unlikely(!skb)) return NETDEV_TX_OK; -#endif + return mlx5e_sq_xmit(sq, skb, wqe, pi); } @@ -466,6 +468,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { + struct mlx5e_sq_stats *stats; struct mlx5e_txqsq *sq; struct mlx5_cqe64 *cqe; u32 dma_fifo_cc; @@ -483,6 +486,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) if (!cqe) return false; + stats = sq->stats; + npkts = 0; nbytes = 0; @@ -511,7 +516,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats->cqe_err++; + stats->cqe_err++; } do { @@ -556,6 +561,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + stats->cqes += i; + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -571,7 +578,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats->wake++; + stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 1b17f682693b..85d517360157 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -32,6 +32,7 @@ #include <linux/irq.h> #include "en.h" +#include "en/xdp.h" static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) { @@ -74,13 +75,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); + struct mlx5e_ch_stats *ch_stats = c->stats; bool busy = false; int work_done = 0; int i; + ch_stats->poll++; + for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); + if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); @@ -94,6 +100,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) return budget; + ch_stats->aff_change++; if (budget && work_done == budget) work_done--; } @@ -101,6 +108,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; + ch_stats->arm++; + for (i = 0; i < c->num_tc; i++) { mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); @@ -110,6 +119,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); + mlx5e_cq_arm(&c->xdpsq.cq); return work_done; } @@ -118,8 +128,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - cq->event_ctr++; napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 406c23862f5f..7669b4380779 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -40,6 +40,7 @@ #include "mlx5_core.h" #include "fpga/core.h" #include "eswitch.h" +#include "diag/fw_tracer.h" enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -168,6 +169,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_GENERAL_EVENT: return "MLX5_EVENT_TYPE_GENERAL_EVENT"; + case MLX5_EVENT_TYPE_DEVICE_TRACER: + return "MLX5_EVENT_TYPE_DEVICE_TRACER"; default: return "Unrecognized event"; } @@ -576,6 +579,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) case MLX5_EVENT_TYPE_GENERAL_EVENT: general_event_handler(dev, eqe); break; + + case MLX5_EVENT_TYPE_DEVICE_TRACER: + mlx5_fw_tracer_event(dev, eqe); + break; + default: mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); @@ -853,6 +861,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, temp_warn_event)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); + if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b174da2884c5..c17bfcab517c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -38,6 +38,7 @@ #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> +#include <linux/mlx5/fs.h> #include "lib/mpfs.h" #ifdef CONFIG_MLX5_ESWITCH @@ -256,9 +257,10 @@ struct mlx5_esw_flow_attr { int out_count; int action; - __be16 vlan_proto; - u16 vlan_vid; - u8 vlan_prio; + __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; + u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; + u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; + u8 total_vlan; bool vlan_handled; u32 encap_id; u32 mod_hdr_id; @@ -282,10 +284,17 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos, u8 set_flags); -static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev) +static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, + u8 vlan_depth) { - return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); + bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); + + if (vlan_depth == 1) + return ret; + + return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); } #define MLX5_DEBUG_ESWITCH_MASK BIT(3) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 91f1209886ff..f72b5c9dcfe9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -66,13 +66,18 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ - if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - flow_act.vlan.ethtype = ntohs(attr->vlan_proto); - flow_act.vlan.vid = attr->vlan_vid; - flow_act.vlan.prio = attr->vlan_prio; + flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); + flow_act.vlan[0].vid = attr->vlan_vid[0]; + flow_act.vlan[0].prio = attr->vlan_prio[0]; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); + flow_act.vlan[1].vid = attr->vlan_vid[1]; + flow_act.vlan[1].prio = attr->vlan_prio[1]; + } } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { @@ -266,7 +271,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, /* protects against (1) setting rules with different vlans to push and * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) */ - if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid)) + if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) goto out_notsupp; return 0; @@ -284,7 +289,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, int err = 0; /* nop if we're on the vlan push/pop non emulation mode */ - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return 0; push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); @@ -324,11 +329,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (vport->vlan_refcount) goto skip_set_push; - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0, + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, SET_VLAN_INSERT | SET_VLAN_STRIP); if (err) goto out; - vport->vlan = attr->vlan_vid; + vport->vlan = attr->vlan_vid[0]; skip_set_push: vport->vlan_refcount++; } @@ -347,7 +352,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, int err = 0; /* nop if we're on the vlan push/pop non emulation mode */ - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) return 0; if (!attr->vlan_handled) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index c9736238604a..5cf5f2a9d51f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, void *ptr) { + unsigned long flags; int ret; /* TLS metadata format is 1 byte for syndrome followed @@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); idr_preload(GFP_KERNEL); - spin_lock_irq(idr_spinlock); + spin_lock_irqsave(idr_spinlock, flags); ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); - spin_unlock_irq(idr_spinlock); + spin_unlock_irqrestore(idr_spinlock, flags); idr_preload_end(); return ret; @@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr, spin_unlock_irqrestore(idr_spinlock, flags); } +static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *buf, u8 status) +{ + kfree(buf); +} + struct mlx5_teardown_stream_context { struct mlx5_fpga_tls_command_context cmd; u32 swid; @@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else + else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->idr_spinlock, + &fdev->tls->tx_idr_spinlock, + ctx->swid); + else + mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, + &fdev->tls->rx_idr_spinlock, ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) MLX5_GET(tls_flow, flow, direction_sx)); } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn) +{ + struct mlx5_fpga_dma_buf *buf; + int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; + void *flow; + void *cmd; + int ret; + + buf = kzalloc(size, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + cmd = (buf + 1); + + rcu_read_lock(); + flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); + rcu_read_unlock(); + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); + MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); + MLX5_SET(tls_cmd, cmd, tcp_sn, seq); + MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); + + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + buf->complete = mlx_tls_kfree_complete; + + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + + return ret; +} + static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { @@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, mlx5_fpga_tls_teardown_completion); } -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags) +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; rcu_read_lock(); - flow = idr_find(&tls->tx_idr, swid); + if (direction_sx) + flow = idr_find(&tls->tx_idr, swid); + else + flow = idr_find(&tls->rx_idr, swid); + rcu_read_unlock(); if (!flow) { @@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, * the command context because we might not have received * the tx completion yet. */ - mlx5_fpga_tls_del_tx_flow(fdev->mdev, - MLX5_GET(tls_cmd, tls_cmd, swid), - GFP_ATOMIC); + mlx5_fpga_tls_del_flow(fdev->mdev, + MLX5_GET(tls_cmd, tls_cmd, swid), + GFP_ATOMIC, + MLX5_GET(tls_cmd, tls_cmd, + direction_sx)); } mlx5_fpga_tls_put_command_ctx(cmd); @@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) if (err) goto error; - if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | - MLX5_ACCEL_TLS_AES_GCM128))) { + if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { err = -ENOTSUPP; goto error; } @@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) INIT_LIST_HEAD(&tls->pending_cmds); idr_init(&tls->tx_idr); - spin_lock_init(&tls->idr_spinlock); + idr_init(&tls->rx_idr); + spin_lock_init(&tls->tx_idr_spinlock); + spin_lock_init(&tls->rx_idr_spinlock); fdev->tls = tls; return 0; @@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, return 0; } -static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, u32 swid, - u32 tcp_sn) +static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 swid, u32 tcp_sn) { u32 caps = mlx5_fpga_tls_device_caps(mdev); struct mlx5_setup_stream_context *ctx; @@ -533,30 +586,42 @@ out: return ret; } -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid) +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx) { struct mlx5_fpga_tls *tls = mdev->fpga->tls; int ret = -ENOMEM; u32 swid; - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); + if (direction_sx) + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, flow); + else + ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, flow); + if (ret < 0) return ret; swid = ret; - MLX5_SET(tls_flow, flow, direction_sx, 1); + MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); - ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, - start_offload_tcp_sn); + ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, + start_offload_tcp_sn); if (ret && ret != -EINTR) goto free_swid; *p_swid = swid; return 0; free_swid: - mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); + if (direction_sx) + mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, swid); + else + mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, swid); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h index 800a214e4e49..3b2e37bf76fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -46,15 +46,18 @@ struct mlx5_fpga_tls { struct mlx5_fpga_conn *conn; struct idr tx_idr; - spinlock_t idr_spinlock; /* protects the IDR */ + struct idr rx_idr; + spinlock_t tx_idr_spinlock; /* protects the IDR */ + spinlock_t rx_idr_spinlock; /* protects the IDR */ }; -int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid); +int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid, + bool direction_sx); -void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags); +void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags, bool direction_sx); bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); @@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) return mdev->fpga->tls->caps; } +int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, + u64 rcd_sn); + #endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 5a00deff5457..6a62b84e57f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -349,9 +349,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype); - MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid); - MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio); + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); + + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); + + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); + MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); + MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6ddb2565884d..a21df24b695e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1465,7 +1465,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_DECAP | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | - MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 | + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; return false; @@ -1824,7 +1826,7 @@ search_again_locked: g = alloc_auto_flow_group(ft, spec); if (IS_ERR(g)) { - rule = (void *)g; + rule = ERR_CAST(g); up_write_ref_node(&ft->node); return rule; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c new file mode 100644 index 000000000000..9a8fd762167b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#include "vxlan.h" + +struct mlx5_vxlan { + struct mlx5_core_dev *mdev; + spinlock_t lock; /* protect vxlan table */ + /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + DECLARE_HASHTABLE(htable, 4); + int num_ports; + struct mutex sync_lock; /* sync add/del port HW operations */ +}; + +struct mlx5_vxlan_port { + struct hlist_node hlist; + atomic_t refcount; + u16 udp_port; +}; + +static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; +} + +static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; + + MLX5_SET(add_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); + MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; + + MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); + MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static struct mlx5_vxlan_port* +mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port) +{ + struct mlx5_vxlan_port *vxlanp; + + hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) { + if (vxlanp->udp_port == port) + return vxlanp; + } + + return NULL; +} + +struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) +{ + struct mlx5_vxlan_port *vxlanp; + + if (!mlx5_vxlan_allowed(vxlan)) + return NULL; + + spin_lock_bh(&vxlan->lock); + vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); + spin_unlock_bh(&vxlan->lock); + + return vxlanp; +} + +int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) +{ + struct mlx5_vxlan_port *vxlanp; + int ret = -ENOSPC; + + vxlanp = mlx5_vxlan_lookup_port(vxlan, port); + if (vxlanp) { + atomic_inc(&vxlanp->refcount); + return 0; + } + + mutex_lock(&vxlan->sync_lock); + if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { + mlx5_core_info(vxlan->mdev, + "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", + port, mlx5_vxlan_max_udp_ports(vxlan->mdev)); + ret = -ENOSPC; + goto unlock; + } + + ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); + if (ret) + goto unlock; + + vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); + if (!vxlanp) { + ret = -ENOMEM; + goto err_delete_port; + } + + vxlanp->udp_port = port; + atomic_set(&vxlanp->refcount, 1); + + spin_lock_bh(&vxlan->lock); + hash_add(vxlan->htable, &vxlanp->hlist, port); + spin_unlock_bh(&vxlan->lock); + + vxlan->num_ports++; + mutex_unlock(&vxlan->sync_lock); + return 0; + +err_delete_port: + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + +unlock: + mutex_unlock(&vxlan->sync_lock); + return ret; +} + +int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) +{ + struct mlx5_vxlan_port *vxlanp; + bool remove = false; + int ret = 0; + + mutex_lock(&vxlan->sync_lock); + + spin_lock_bh(&vxlan->lock); + vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); + if (!vxlanp) { + ret = -ENOENT; + goto out_unlock; + } + + if (atomic_dec_and_test(&vxlanp->refcount)) { + hash_del(&vxlanp->hlist); + remove = true; + } + +out_unlock: + spin_unlock_bh(&vxlan->lock); + + if (remove) { + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + kfree(vxlanp); + vxlan->num_ports--; + } + + mutex_unlock(&vxlan->sync_lock); + + return ret; +} + +struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) +{ + struct mlx5_vxlan *vxlan; + + if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev)) + return ERR_PTR(-ENOTSUPP); + + vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); + if (!vxlan) + return ERR_PTR(-ENOMEM); + + vxlan->mdev = mdev; + mutex_init(&vxlan->sync_lock); + spin_lock_init(&vxlan->lock); + hash_init(vxlan->htable); + + /* Hardware adds 4789 by default */ + mlx5_vxlan_add_port(vxlan, 4789); + + return vxlan; +} + +void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) +{ + struct mlx5_vxlan_port *vxlanp; + struct hlist_node *tmp; + int bkt; + + if (!mlx5_vxlan_allowed(vxlan)) + return; + + /* Lockless since we are the only hash table consumers*/ + hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { + hash_del(&vxlanp->hlist); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port); + kfree(vxlanp); + } + + kfree(vxlan); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h index 5ef6ae7d568a..fd874a30c4d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h @@ -33,31 +33,32 @@ #define __MLX5_VXLAN_H__ #include <linux/mlx5/driver.h> -#include "en.h" -struct mlx5e_vxlan { - atomic_t refcount; - u16 udp_port; -}; +struct mlx5_vxlan; +struct mlx5_vxlan_port; -struct mlx5e_vxlan_work { - struct work_struct work; - struct mlx5e_priv *priv; - sa_family_t sa_family; - u16 port; -}; +#ifdef CONFIG_MLX5_CORE_EN -static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) +static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) { - return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && - mlx5_core_is_pf(mdev)); + /* not allowed reason is encoded in vxlan pointer as error, + * on mlx5_vxlan_create + */ + return !IS_ERR_OR_NULL(vxlan); } -void mlx5e_vxlan_init(struct mlx5e_priv *priv); -void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); +struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); +void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); +int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); +int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); +struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, - u16 port, int add); -struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); +#else + +static inline struct mlx5_vxlan* +mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); } +static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } + +#endif #endif /* __MLX5_VXLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 615005e63819..03b9c6733eed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,9 +62,11 @@ #include "accel/ipsec.h" #include "accel/tls.h" #include "lib/clock.h" +#include "lib/vxlan.h" +#include "diag/fw_tracer.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); -MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); +MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRIVER_VERSION); @@ -960,6 +962,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_init_clock(dev); + dev->vxlan = mlx5_vxlan_create(dev); + err = mlx5_init_rl_table(dev); if (err) { dev_err(&pdev->dev, "Failed to init rate limiting\n"); @@ -990,6 +994,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_sriov_cleanup; } + dev->tracer = mlx5_fw_tracer_create(dev); + return 0; err_sriov_cleanup: @@ -1001,6 +1007,7 @@ err_mpfs_cleanup: err_rl_cleanup: mlx5_cleanup_rl_table(dev); err_tables_cleanup: + mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); @@ -1015,11 +1022,13 @@ out: static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { + mlx5_fw_tracer_destroy(dev->tracer); mlx5_fpga_cleanup(dev); mlx5_sriov_cleanup(dev); mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); + mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); @@ -1167,10 +1176,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_put_uars; } + err = mlx5_fw_tracer_init(dev->tracer); + if (err) { + dev_err(&pdev->dev, "Failed to init FW tracer\n"); + goto err_fw_tracer; + } + err = alloc_comp_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); - goto err_stop_eqs; + goto err_comp_eqs; } err = mlx5_irq_set_affinity_hints(dev); @@ -1252,7 +1267,10 @@ err_fpga_start: err_affinity_hints: free_comp_eqs(dev); -err_stop_eqs: +err_comp_eqs: + mlx5_fw_tracer_cleanup(dev->tracer); + +err_fw_tracer: mlx5_stop_eqs(dev); err_put_uars: @@ -1320,6 +1338,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); + mlx5_fw_tracer_cleanup(dev->tracer); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); mlx5_free_irq_vectors(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 023882d9a22e..49955117ae36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -66,6 +66,12 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited(&(__dev)->pdev->dev, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_warn(__dev, format, ...) \ dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index f4f02f775c93..0670165afd5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -146,23 +146,6 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, } EXPORT_SYMBOL(mlx5_core_query_mkey); -int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, - u32 *mkey) -{ - u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; - int err; - - MLX5_SET(query_special_contexts_in, in, opcode, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (!err) - *mkey = MLX5_GET(query_special_contexts_out, out, - dump_fill_mkey); - return err; -} -EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); - static inline u32 mlx5_get_psv(u32 *out, int psv_index) { switch (psv_index) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c deleted file mode 100644 index 2f74953e4561..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mlx5/driver.h> -#include "mlx5_core.h" -#include "vxlan.h" - -void mlx5e_vxlan_init(struct mlx5e_priv *priv) -{ - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - - spin_lock_init(&vxlan_db->lock); - INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC); -} - -static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) -{ - u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; - - MLX5_SET(add_vxlan_udp_dport_in, in, opcode, - MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); - MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) -{ - u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; - - MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, - MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); - MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); -} - -struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) -{ - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; - - spin_lock_bh(&vxlan_db->lock); - vxlan = radix_tree_lookup(&vxlan_db->tree, port); - spin_unlock_bh(&vxlan_db->lock); - - return vxlan; -} - -static void mlx5e_vxlan_add_port(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - u16 port = vxlan_work->port; - struct mlx5e_vxlan *vxlan; - int err; - - mutex_lock(&priv->state_lock); - vxlan = mlx5e_vxlan_lookup_port(priv, port); - if (vxlan) { - atomic_inc(&vxlan->refcount); - goto free_work; - } - - if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) - goto free_work; - - vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); - if (!vxlan) - goto err_delete_port; - - vxlan->udp_port = port; - atomic_set(&vxlan->refcount, 1); - - spin_lock_bh(&vxlan_db->lock); - err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); - spin_unlock_bh(&vxlan_db->lock); - if (err) - goto err_free; - - goto free_work; - -err_free: - kfree(vxlan); -err_delete_port: - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); -free_work: - mutex_unlock(&priv->state_lock); - kfree(vxlan_work); -} - -static void mlx5e_vxlan_del_port(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - u16 port = vxlan_work->port; - struct mlx5e_vxlan *vxlan; - bool remove = false; - - mutex_lock(&priv->state_lock); - spin_lock_bh(&vxlan_db->lock); - vxlan = radix_tree_lookup(&vxlan_db->tree, port); - if (!vxlan) - goto out_unlock; - - if (atomic_dec_and_test(&vxlan->refcount)) { - radix_tree_delete(&vxlan_db->tree, port); - remove = true; - } - -out_unlock: - spin_unlock_bh(&vxlan_db->lock); - - if (remove) { - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); - kfree(vxlan); - } - mutex_unlock(&priv->state_lock); - kfree(vxlan_work); -} - -void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, - u16 port, int add) -{ - struct mlx5e_vxlan_work *vxlan_work; - - vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); - if (!vxlan_work) - return; - - if (add) - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port); - else - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port); - - vxlan_work->priv = priv; - vxlan_work->port = port; - vxlan_work->sa_family = sa_family; - queue_work(priv->wq, &vxlan_work->work); -} - -void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) -{ - struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; - struct mlx5e_vxlan *vxlan; - unsigned int port = 0; - - /* Lockless since we are the only radix-tree consumers, wq is disabled */ - while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { - port = vxlan->udp_port; - radix_tree_delete(&vxlan_db->tree, port); - mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); - kfree(vxlan); - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 0b47126815b6..2bd4c3184eba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) return !wq->cur_sz; } +static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) +{ + return wq->fbc.sz_m1 - wq->cur_sz; +} + static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { return mlx5_frag_buf_get_wqe(&wq->fbc, ix); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 82827a8d3d67..8a291eb36c64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -78,6 +78,7 @@ config MLXSW_SPECTRUM depends on IPV6 || IPV6=n depends on NET_IPGRE || NET_IPGRE=n depends on IPV6_GRE || IPV6_GRE=n + select GENERIC_ALLOCATOR select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 0cadcabfe86f..68fa44a41485 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,11 +15,18 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ - spectrum_kvdl.o spectrum_acl_tcam.o \ - spectrum_acl.o spectrum_flower.o \ - spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o spectrum_acl_flex_actions.o \ - spectrum_mr.o spectrum_mr_tcam.o \ + spectrum1_kvdl.o spectrum2_kvdl.o \ + spectrum_kvdl.o \ + spectrum_acl_tcam.o spectrum_acl_ctcam.o \ + spectrum_acl_atcam.o spectrum_acl_erp.o \ + spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ + spectrum_acl.o \ + spectrum_flower.o spectrum_cnt.o \ + spectrum_fid.o spectrum_ipip.o \ + spectrum_acl_flex_actions.o \ + spectrum_acl_flex_keys.o \ + spectrum1_mr_tcam.o spectrum2_mr_tcam.o \ + spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 3c0d882ba183..a4669e79fdf9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -351,9 +351,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa) block->first_set = mlxsw_afa_set_create(true); if (!block->first_set) goto err_first_set_create; - block->cur_set = block->first_set; + + /* In case user instructs to have dummy first set, we leave it + * empty here and create another, real, set right away. + */ + if (mlxsw_afa->ops->dummy_first_set) { + block->cur_set = mlxsw_afa_set_create(false); + if (!block->cur_set) + goto err_second_set_create; + block->cur_set->prev = block->first_set; + block->first_set->next = block->cur_set; + } else { + block->cur_set = block->first_set; + } + return block; +err_second_set_create: + mlxsw_afa_set_destroy(block->first_set); err_first_set_create: kfree(block); return NULL; @@ -415,11 +430,31 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_set); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block) { - return block->first_set->kvdl_index; + return block->cur_set->ht_key.enc_actions; } -EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); +EXPORT_SYMBOL(mlxsw_afa_block_cur_set); + +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block) +{ + /* First set is never in KVD linear. So the first set + * with valid KVD linear index is always the second one. + */ + if (WARN_ON(!block->first_set->next)) + return 0; + return block->first_set->next->kvdl_index; +} +EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index); + +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity) +{ + u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block); + + return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv, + kvdl_index, activity); +} +EXPORT_SYMBOL(mlxsw_afa_block_activity_get); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block) { @@ -718,14 +753,17 @@ mlxsw_afa_vlan_pack(char *payload, } int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, - u16 vid, u8 pcp, u8 et) + u16 vid, u8 pcp, u8 et, + struct netlink_ext_ack *extack) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VLAN_CODE, MLXSW_AFA_VLAN_SIZE); - if (!act) + if (!act) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action"); return -ENOBUFS; + } mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, @@ -918,19 +956,23 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, - const struct net_device *out_dev, bool ingress) + const struct net_device *out_dev, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_afa_mirror *mirror; int err; mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev, ingress); - if (IS_ERR(mirror)) + if (IS_ERR(mirror)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action"); return PTR_ERR(mirror); - + } err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action"); goto err_append_allocated_mirror; + } return 0; @@ -980,24 +1022,30 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type, } int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port) + u8 local_port, bool in_port, + struct netlink_ext_ack *extack) { struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref; u32 kvdl_index; char *act; int err; - if (in_port) + if (in_port) { + NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported"); return -EOPNOTSUPP; + } fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port); - if (IS_ERR(fwd_entry_ref)) + if (IS_ERR(fwd_entry_ref)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action"); return PTR_ERR(fwd_entry_ref); + } kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index; act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, MLXSW_AFA_FORWARD_SIZE); if (!act) { err = -ENOBUFS; + NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action"); goto err_append_action; } mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, @@ -1061,21 +1109,25 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter); int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 *p_counter_index) + u32 *p_counter_index, + struct netlink_ext_ack *extack) { struct mlxsw_afa_counter *counter; u32 counter_index; int err; counter = mlxsw_afa_counter_create(block); - if (IS_ERR(counter)) + if (IS_ERR(counter)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot create count action"); return PTR_ERR(counter); + } counter_index = counter->counter_index; err = mlxsw_afa_block_append_allocated_counter(block, counter_index); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append count action"); goto err_append_allocated_counter; - + } if (p_counter_index) *p_counter_index = counter_index; return 0; @@ -1118,13 +1170,16 @@ static inline void mlxsw_afa_virfwd_pack(char *payload, mlxsw_afa_virfwd_fid_set(payload, fid); } -int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, + struct netlink_ext_ack *extack) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_VIRFWD_CODE, MLXSW_AFA_VIRFWD_SIZE); - if (!act) + if (!act) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action"); return -ENOBUFS; + } mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 3a155d104384..a6ffadd30807 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -45,6 +45,8 @@ struct mlxsw_afa_ops { int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index, char *enc_actions, bool is_first); void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first); + int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index, + bool *activity); int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port); void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); @@ -54,6 +56,7 @@ struct mlxsw_afa_ops { bool ingress, int *p_span_id); void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); + bool dummy_first_set; }; struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, @@ -64,7 +67,9 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa); void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); -u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); +char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block); +u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block); +int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity); int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); @@ -75,16 +80,21 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, const struct net_device *out_dev, - bool ingress); + bool ingress, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, - u8 local_port, bool in_port); + u8 local_port, bool in_port, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, - u16 vid, u8 pcp, u8 et); + u16 vid, u8 pcp, u8 et, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, u32 counter_index); int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, - u32 *p_counter_index); -int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); + u32 *p_counter_index, + struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, + struct netlink_ext_ack *extack); int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, u16 expected_irif, u16 min_mtu, bool rmid_valid, u32 kvdl_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index b32a00972e83..9649b4d9349a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -43,6 +43,7 @@ struct mlxsw_afk { struct list_head key_info_list; unsigned int max_blocks; + const struct mlxsw_afk_ops *ops; const struct mlxsw_afk_block *blocks; unsigned int blocks_count; }; @@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk) } struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count) + const struct mlxsw_afk_ops *ops) { struct mlxsw_afk *mlxsw_afk; @@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, return NULL; INIT_LIST_HEAD(&mlxsw_afk->key_info_list); mlxsw_afk->max_blocks = max_blocks; - mlxsw_afk->blocks = blocks; - mlxsw_afk->blocks_count = blocks_count; + mlxsw_afk->ops = ops; + mlxsw_afk->blocks = ops->blocks; + mlxsw_afk->blocks_count = ops->blocks_count; WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk)); return mlxsw_afk; } @@ -415,61 +416,76 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, } EXPORT_SYMBOL(mlxsw_afk_values_add_buf); -static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) +static void mlxsw_sp_afk_encode_u32(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) { u32 value; value = __mlxsw_item_get32(storage, storage_item, 0); - __mlxsw_item_set32(output_indexed, output_item, 0, value); + __mlxsw_item_set32(output, output_item, 0, value); } -static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item, - const struct mlxsw_item *output_item, - char *storage, char *output_indexed) +static void mlxsw_sp_afk_encode_buf(const struct mlxsw_item *storage_item, + const struct mlxsw_item *output_item, + char *storage, char *output) { char *storage_data = __mlxsw_item_data(storage, storage_item, 0); - char *output_data = __mlxsw_item_data(output_indexed, output_item, 0); + char *output_data = __mlxsw_item_data(output, output_item, 0); size_t len = output_item->size.bytes; memcpy(output_data, storage_data, len); } -#define MLXSW_AFK_KEY_BLOCK_SIZE 16 - -static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, - int block_index, char *storage, char *output) +static void +mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, + char *output, char *storage) { - char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE; const struct mlxsw_item *storage_item = &elinst->info->item; const struct mlxsw_item *output_item = &elinst->item; if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32) - mlxsw_afk_encode_u32(storage_item, output_item, - storage, output_indexed); + mlxsw_sp_afk_encode_u32(storage_item, output_item, + storage, output); else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF) - mlxsw_afk_encode_buf(storage_item, output_item, - storage, output_indexed); + mlxsw_sp_afk_encode_buf(storage_item, output_item, + storage, output); } -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +#define MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE 16 + +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask) + char *key, char *mask, int block_start, int block_end) { + char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; + char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; const struct mlxsw_afk_element_inst *elinst; enum mlxsw_afk_element element; - int block_index; + int block_index, i; + + for (i = block_start; i <= block_end; i++) { + memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); + + mlxsw_afk_element_usage_for_each(element, &values->elusage) { + elinst = mlxsw_afk_key_info_elinst_get(key_info, + element, + &block_index); + if (!elinst || block_index != i) + continue; + + mlxsw_sp_afk_encode_one(elinst, block_key, + values->storage.key); + mlxsw_sp_afk_encode_one(elinst, block_mask, + values->storage.mask); + } - mlxsw_afk_element_usage_for_each(element, &values->elusage) { - elinst = mlxsw_afk_key_info_elinst_get(key_info, element, - &block_index); - if (!elinst) - continue; - mlxsw_afk_encode_one(elinst, block_index, - values->storage.key, key); - mlxsw_afk_encode_one(elinst, block_index, - values->storage.mask, mask); + if (key) + mlxsw_afk->ops->encode_block(block_key, i, key); + if (mask) + mlxsw_afk->ops->encode_block(block_mask, i, mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 122506daa586..18d9bfed6001 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -42,16 +42,20 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -99,9 +103,11 @@ struct mlxsw_afk_element_info { * define an internal storage geometry. */ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { - MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16), - MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6), - MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2), + MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4), MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), @@ -112,12 +118,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4), }; #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 @@ -208,9 +216,14 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small, struct mlxsw_afk; +struct mlxsw_afk_ops { + const struct mlxsw_afk_block *blocks; + unsigned int blocks_count; + void (*encode_block)(char *block, int block_index, char *output); +}; + struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, - const struct mlxsw_afk_block *blocks, - unsigned int blocks_count); + const struct mlxsw_afk_ops *ops); void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk); struct mlxsw_afk_key_info; @@ -243,8 +256,9 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, enum mlxsw_afk_element element, const char *key_value, const char *mask_value, unsigned int len); -void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info, +void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, + struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask); + char *key, char *mask, int block_start, int block_end); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index d65582325cd5..7461f8fe1133 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -39,6 +39,7 @@ #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 #define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c #define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20 #define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1877d9f8a11a..e52841627966 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -39,6 +39,7 @@ #ifndef _MLXSW_REG_H #define _MLXSW_REG_H +#include <linux/kernel.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/if_vlan.h> @@ -1943,6 +1944,28 @@ static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); } +/* PGCR - Policy-Engine General Configuration Register + * --------------------------------------------------- + * This register configures general Policy-Engine settings. + */ +#define MLXSW_REG_PGCR_ID 0x3001 +#define MLXSW_REG_PGCR_LEN 0x20 + +MLXSW_REG_DEFINE(pgcr, MLXSW_REG_PGCR_ID, MLXSW_REG_PGCR_LEN); + +/* reg_pgcr_default_action_pointer_base + * Default action pointer base. Each region has a default action pointer + * which is equal to default_action_pointer_base + region_id. + * Access: RW + */ +MLXSW_ITEM32(reg, pgcr, default_action_pointer_base, 0x1C, 0, 24); + +static inline void mlxsw_reg_pgcr_pack(char *payload, u32 pointer_base) +{ + MLXSW_REG_ZERO(pgcr, payload); + mlxsw_reg_pgcr_default_action_pointer_base_set(payload, pointer_base); +} + /* PPBT - Policy-Engine Port Binding Table * --------------------------------------- * This register is used for configuration of the Port Binding Table. @@ -2132,14 +2155,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4); /* reg_ptar_action_set_type * Type of action set to be used on this region. - * For Spectrum, this is always type 2 - "flexible" + * For Spectrum and Spectrum-2, this is always type 2 - "flexible" * Access: WO */ MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8); +enum mlxsw_reg_ptar_key_type { + MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */ + MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */ +}; + /* reg_ptar_key_type * TCAM key type for the region. - * For Spectrum, this is always type 0x50 - "FLEX_KEY" * Access: WO */ MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8); @@ -2182,13 +2209,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8, MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false); static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op, + enum mlxsw_reg_ptar_key_type key_type, u16 region_size, u16 region_id, const char *tcam_region_info) { MLXSW_REG_ZERO(ptar, payload); mlxsw_reg_ptar_op_set(payload, op); mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */ - mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */ + mlxsw_reg_ptar_key_type_set(payload, key_type); mlxsw_reg_ptar_region_size_set(payload, region_size); mlxsw_reg_ptar_region_id_set(payload, region_id); mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info); @@ -2327,6 +2355,23 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); */ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); +/* reg_pefa_a + * Index in the KVD Linear Centralized Database. + * Activity + * For a new entry: set if ca=0, clear if ca=1 + * Set if a packet lookup has hit on the specific entry + * Access: RO + */ +MLXSW_ITEM32(reg, pefa, a, 0x04, 29, 1); + +/* reg_pefa_ca + * Clear activity + * When write: activity is according to this field + * When read: after reading the activity is cleared according to ca + * Access: OP + */ +MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1); + #define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8 /* reg_pefa_flex_action_set @@ -2336,12 +2381,20 @@ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); */ MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN); -static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, +static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca, const char *flex_action_set) { MLXSW_REG_ZERO(pefa, payload); mlxsw_reg_pefa_index_set(payload, index); - mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set); + mlxsw_reg_pefa_ca_set(payload, ca); + if (flex_action_set) + mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, + flex_action_set); +} + +static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a) +{ + *p_a = mlxsw_reg_pefa_a_get(payload); } /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 @@ -2397,6 +2450,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3); */ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); +/* reg_ptce2_priority + * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1. + * Note: priority does not have to be unique per rule. + * Within a region, higher priority should have lower offset (no limitation + * between regions in a multi-region). + * Access: RW + */ +MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24); + /* reg_ptce2_tcam_region_info * Opaque object that represents the TCAM region. * Access: Index @@ -2404,14 +2466,14 @@ MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16); MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10, MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); -#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96 +#define MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN 96 /* reg_ptce2_flex_key_blocks * ACL Key. * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, - MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); /* reg_ptce2_mask * mask- in the same size as key. A bit that is set directs the TCAM @@ -2420,7 +2482,7 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20, * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, - MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN); + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); /* reg_ptce2_flex_action_set * ACL action set. @@ -2432,15 +2494,567 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, const char *tcam_region_info, - u16 offset) + u16 offset, u32 priority) { MLXSW_REG_ZERO(ptce2, payload); mlxsw_reg_ptce2_v_set(payload, valid); mlxsw_reg_ptce2_op_set(payload, op); mlxsw_reg_ptce2_offset_set(payload, offset); + mlxsw_reg_ptce2_priority_set(payload, priority); mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info); } +/* PERPT - Policy-Engine ERP Table Register + * ---------------------------------------- + * This register adds and removes eRPs from the eRP table. + */ +#define MLXSW_REG_PERPT_ID 0x3021 +#define MLXSW_REG_PERPT_LEN 0x80 + +MLXSW_REG_DEFINE(perpt, MLXSW_REG_PERPT_ID, MLXSW_REG_PERPT_LEN); + +/* reg_perpt_erpt_bank + * eRP table bank. + * Range 0 .. cap_max_erp_table_banks - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, perpt, erpt_bank, 0x00, 16, 4); + +/* reg_perpt_erpt_index + * Index to eRP table within the eRP bank. + * Range is 0 .. cap_max_erp_table_bank_size - 1 + * Access: Index + */ +MLXSW_ITEM32(reg, perpt, erpt_index, 0x00, 0, 8); + +enum mlxsw_reg_perpt_key_size { + MLXSW_REG_PERPT_KEY_SIZE_2KB, + MLXSW_REG_PERPT_KEY_SIZE_4KB, + MLXSW_REG_PERPT_KEY_SIZE_8KB, + MLXSW_REG_PERPT_KEY_SIZE_12KB, +}; + +/* reg_perpt_key_size + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, key_size, 0x04, 0, 4); + +/* reg_perpt_bf_bypass + * 0 - The eRP is used only if bloom filter state is set for the given + * rule. + * 1 - The eRP is used regardless of bloom filter state. + * The bypass is an OR condition of region_id or eRP. See PERCR.bf_bypass + * Access: RW + */ +MLXSW_ITEM32(reg, perpt, bf_bypass, 0x08, 8, 1); + +/* reg_perpt_erp_id + * eRP ID for use by the rules. + * Access: RW + */ +MLXSW_ITEM32(reg, perpt, erp_id, 0x08, 0, 4); + +/* reg_perpt_erpt_base_bank + * Base eRP table bank, points to head of erp_vector + * Range is 0 .. cap_max_erp_table_banks - 1 + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erpt_base_bank, 0x0C, 16, 4); + +/* reg_perpt_erpt_base_index + * Base index to eRP table within the eRP bank + * Range is 0 .. cap_max_erp_table_bank_size - 1 + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erpt_base_index, 0x0C, 0, 8); + +/* reg_perpt_erp_index_in_vector + * eRP index in the vector. + * Access: OP + */ +MLXSW_ITEM32(reg, perpt, erp_index_in_vector, 0x10, 0, 4); + +/* reg_perpt_erp_vector + * eRP vector. + * Access: OP + */ +MLXSW_ITEM_BIT_ARRAY(reg, perpt, erp_vector, 0x14, 4, 1); + +/* reg_perpt_mask + * Mask + * 0 - A-TCAM will ignore the bit in key + * 1 - A-TCAM will compare the bit in key + * Access: RW + */ +MLXSW_ITEM_BUF(reg, perpt, mask, 0x20, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + +static inline void mlxsw_reg_perpt_erp_vector_pack(char *payload, + unsigned long *erp_vector, + unsigned long size) +{ + unsigned long bit; + + for_each_set_bit(bit, erp_vector, size) + mlxsw_reg_perpt_erp_vector_set(payload, bit, true); +} + +static inline void +mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index, + enum mlxsw_reg_perpt_key_size key_size, u8 erp_id, + u8 erpt_base_bank, u8 erpt_base_index, u8 erp_index, + char *mask) +{ + MLXSW_REG_ZERO(perpt, payload); + mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank); + mlxsw_reg_perpt_erpt_index_set(payload, erpt_index); + mlxsw_reg_perpt_key_size_set(payload, key_size); + mlxsw_reg_perpt_bf_bypass_set(payload, true); + mlxsw_reg_perpt_erp_id_set(payload, erp_id); + mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank); + mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index); + mlxsw_reg_perpt_erp_index_in_vector_set(payload, erp_index); + mlxsw_reg_perpt_mask_memcpy_to(payload, mask); +} + +/* PERAR - Policy-Engine Region Association Register + * ------------------------------------------------- + * This register associates a hw region for region_id's. Changing on the fly + * is supported by the device. + */ +#define MLXSW_REG_PERAR_ID 0x3026 +#define MLXSW_REG_PERAR_LEN 0x08 + +MLXSW_REG_DEFINE(perar, MLXSW_REG_PERAR_ID, MLXSW_REG_PERAR_LEN); + +/* reg_perar_region_id + * Region identifier + * Range 0 .. cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, perar, region_id, 0x00, 0, 16); + +static inline unsigned int +mlxsw_reg_perar_hw_regions_needed(unsigned int block_num) +{ + return DIV_ROUND_UP(block_num, 4); +} + +/* reg_perar_hw_region + * HW Region + * Range 0 .. cap_max_regions-1 + * Default: hw_region = region_id + * For a 8 key block region, 2 consecutive regions are used + * For a 12 key block region, 3 consecutive regions are used + * Access: RW + */ +MLXSW_ITEM32(reg, perar, hw_region, 0x04, 0, 16); + +static inline void mlxsw_reg_perar_pack(char *payload, u16 region_id, + u16 hw_region) +{ + MLXSW_REG_ZERO(perar, payload); + mlxsw_reg_perar_region_id_set(payload, region_id); + mlxsw_reg_perar_hw_region_set(payload, hw_region); +} + +/* PTCE-V3 - Policy-Engine TCAM Entry Register Version 3 + * ----------------------------------------------------- + * This register is a new version of PTCE-V2 in order to support the + * A-TCAM. This register is not supported by SwitchX/-2 and Spectrum. + */ +#define MLXSW_REG_PTCE3_ID 0x3027 +#define MLXSW_REG_PTCE3_LEN 0xF0 + +MLXSW_REG_DEFINE(ptce3, MLXSW_REG_PTCE3_ID, MLXSW_REG_PTCE3_LEN); + +/* reg_ptce3_v + * Valid. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, v, 0x00, 31, 1); + +enum mlxsw_reg_ptce3_op { + /* Write operation. Used to write a new entry to the table. + * All R/W fields are relevant for new entry. Activity bit is set + * for new entries. Write with v = 0 will delete the entry. Must + * not be used if an entry exists. + */ + MLXSW_REG_PTCE3_OP_WRITE_WRITE = 0, + /* Update operation */ + MLXSW_REG_PTCE3_OP_WRITE_UPDATE = 1, + /* Read operation */ + MLXSW_REG_PTCE3_OP_QUERY_READ = 0, +}; + +/* reg_ptce3_op + * Access: OP + */ +MLXSW_ITEM32(reg, ptce3, op, 0x00, 20, 3); + +/* reg_ptce3_priority + * Priority of the rule. Higher values win. + * For Spectrum-2 range is 1..cap_kvd_size - 1 + * Note: Priority does not have to be unique per rule. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, priority, 0x04, 0, 24); + +/* reg_ptce3_tcam_region_info + * Opaque object that represents the TCAM region. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce3, tcam_region_info, 0x10, + MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN); + +/* reg_ptce3_flex2_key_blocks + * ACL key. The key must be masked according to eRP (if exists) or + * according to master mask. + * Access: Index + */ +MLXSW_ITEM_BUF(reg, ptce3, flex2_key_blocks, 0x20, + MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + +/* reg_ptce3_erp_id + * eRP ID. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, erp_id, 0x80, 0, 4); + +/* reg_ptce3_delta_start + * Start point of delta_value and delta_mask, in bits. Must not exceed + * num_key_blocks * 36 - 8. Reserved when delta_mask = 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_start, 0x84, 0, 10); + +/* reg_ptce3_delta_mask + * Delta mask. + * 0 - Ignore relevant bit in delta_value + * 1 - Compare relevant bit in delta_value + * Delta mask must not be set for reserved fields in the key blocks. + * Note: No delta when no eRPs. Thus, for regions with + * PERERP.erpt_pointer_valid = 0 the delta mask must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_mask, 0x88, 16, 8); + +/* reg_ptce3_delta_value + * Delta value. + * Bits which are masked by delta_mask must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ptce3, delta_value, 0x88, 0, 8); + +/* reg_ptce3_prune_vector + * Pruning vector relative to the PERPT.erp_id. + * Used for reducing lookups. + * 0 - NEED: Do a lookup using the eRP. + * 1 - PRUNE: Do not perform a lookup using the eRP. + * Maybe be modified by PEAPBL and PEAPBM. + * Note: In Spectrum-2, a region of 8 key blocks must be set to either + * all 1's or all 0's. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, ptce3, prune_vector, 0x90, 4, 1); + +/* reg_ptce3_prune_ctcam + * Pruning on C-TCAM. Used for reducing lookups. + * 0 - NEED: Do a lookup in the C-TCAM. + * 1 - PRUNE: Do not perform a lookup in the C-TCAM. + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, prune_ctcam, 0x94, 31, 1); + +/* reg_ptce3_large_exists + * Large entry key ID exists. + * Within the region: + * 0 - SINGLE: The large_entry_key_id is not currently in use. + * For rule insert: The MSB of the key (blocks 6..11) will be added. + * For rule delete: The MSB of the key will be removed. + * 1 - NON_SINGLE: The large_entry_key_id is currently in use. + * For rule insert: The MSB of the key (blocks 6..11) will not be added. + * For rule delete: The MSB of the key will not be removed. + * Access: WO + */ +MLXSW_ITEM32(reg, ptce3, large_exists, 0x98, 31, 1); + +/* reg_ptce3_large_entry_key_id + * Large entry key ID. + * A key for 12 key blocks rules. Reserved when region has less than 12 key + * blocks. Must be different for different keys which have the same common + * 6 key blocks (MSB, blocks 6..11) key within a region. + * Range is 0..cap_max_pe_large_key_id - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, large_entry_key_id, 0x98, 0, 24); + +/* reg_ptce3_action_pointer + * Pointer to action. + * Range is 0..cap_max_kvd_action_sets - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ptce3, action_pointer, 0xA0, 0, 24); + +static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid, + enum mlxsw_reg_ptce3_op op, + u32 priority, + const char *tcam_region_info, + const char *key, u8 erp_id, + bool large_exists, u32 lkey_id, + u32 action_pointer) +{ + MLXSW_REG_ZERO(ptce3, payload); + mlxsw_reg_ptce3_v_set(payload, valid); + mlxsw_reg_ptce3_op_set(payload, op); + mlxsw_reg_ptce3_priority_set(payload, priority); + mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info); + mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key); + mlxsw_reg_ptce3_erp_id_set(payload, erp_id); + mlxsw_reg_ptce3_large_exists_set(payload, large_exists); + mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id); + mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer); +} + +/* PERCR - Policy-Engine Region Configuration Register + * --------------------------------------------------- + * This register configures the region parameters. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERCR_ID 0x302A +#define MLXSW_REG_PERCR_LEN 0x80 + +MLXSW_REG_DEFINE(percr, MLXSW_REG_PERCR_ID, MLXSW_REG_PERCR_LEN); + +/* reg_percr_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, percr, region_id, 0x00, 0, 16); + +/* reg_percr_atcam_ignore_prune + * Ignore prune_vector by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, atcam_ignore_prune, 0x04, 25, 1); + +/* reg_percr_ctcam_ignore_prune + * Ignore prune_ctcam by other A-TCAM rules. Used e.g., for a new rule. + * Access: RW + */ +MLXSW_ITEM32(reg, percr, ctcam_ignore_prune, 0x04, 24, 1); + +/* reg_percr_bf_bypass + * Bloom filter bypass. + * 0 - Bloom filter is used (default) + * 1 - Bloom filter is bypassed. The bypass is an OR condition of + * region_id or eRP. See PERPT.bf_bypass + * Access: RW + */ +MLXSW_ITEM32(reg, percr, bf_bypass, 0x04, 16, 1); + +/* reg_percr_master_mask + * Master mask. Logical OR mask of all masks of all rules of a region + * (both A-TCAM and C-TCAM). When there are no eRPs + * (erpt_pointer_valid = 0), then this provides the mask. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, percr, master_mask, 0x20, 96); + +static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) +{ + MLXSW_REG_ZERO(percr, payload); + mlxsw_reg_percr_region_id_set(payload, region_id); + mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); + mlxsw_reg_percr_bf_bypass_set(payload, true); +} + +/* PERERP - Policy-Engine Region eRP Register + * ------------------------------------------ + * This register configures the region eRP. The region_id must be + * allocated. + */ +#define MLXSW_REG_PERERP_ID 0x302B +#define MLXSW_REG_PERERP_LEN 0x1C + +MLXSW_REG_DEFINE(pererp, MLXSW_REG_PERERP_ID, MLXSW_REG_PERERP_LEN); + +/* reg_pererp_region_id + * Region identifier. + * Range 0..cap_max_regions-1 + * Access: Index + */ +MLXSW_ITEM32(reg, pererp, region_id, 0x00, 0, 16); + +/* reg_pererp_ctcam_le + * C-TCAM lookup enable. Reserved when erpt_pointer_valid = 0. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, ctcam_le, 0x04, 28, 1); + +/* reg_pererp_erpt_pointer_valid + * erpt_pointer is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer_valid, 0x10, 31, 1); + +/* reg_pererp_erpt_bank_pointer + * Pointer to eRP table bank. May be modified at any time. + * Range 0..cap_max_erp_table_banks-1 + * Reserved when erpt_pointer_valid = 0 + */ +MLXSW_ITEM32(reg, pererp, erpt_bank_pointer, 0x10, 16, 4); + +/* reg_pererp_erpt_pointer + * Pointer to eRP table within the eRP bank. Can be changed for an + * existing region. + * Range 0..cap_max_erp_table_size-1 + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, erpt_pointer, 0x10, 0, 8); + +/* reg_pererp_erpt_vector + * Vector of allowed eRP indexes starting from erpt_pointer within the + * erpt_bank_pointer. Next entries will be in next bank. + * Note that eRP index is used and not eRP ID. + * Reserved when erpt_pointer_valid = 0 + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pererp, erpt_vector, 0x14, 4, 1); + +/* reg_pererp_master_rp_id + * Master RP ID. When there are no eRPs, then this provides the eRP ID + * for the lookup. Can be changed for an existing region. + * Reserved when erpt_pointer_valid = 1 + * Access: RW + */ +MLXSW_ITEM32(reg, pererp, master_rp_id, 0x18, 0, 4); + +static inline void mlxsw_reg_pererp_erp_vector_pack(char *payload, + unsigned long *erp_vector, + unsigned long size) +{ + unsigned long bit; + + for_each_set_bit(bit, erp_vector, size) + mlxsw_reg_pererp_erpt_vector_set(payload, bit, true); +} + +static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id, + bool ctcam_le, bool erpt_pointer_valid, + u8 erpt_bank_pointer, u8 erpt_pointer, + u8 master_rp_id) +{ + MLXSW_REG_ZERO(pererp, payload); + mlxsw_reg_pererp_region_id_set(payload, region_id); + mlxsw_reg_pererp_ctcam_le_set(payload, ctcam_le); + mlxsw_reg_pererp_erpt_pointer_valid_set(payload, erpt_pointer_valid); + mlxsw_reg_pererp_erpt_bank_pointer_set(payload, erpt_bank_pointer); + mlxsw_reg_pererp_erpt_pointer_set(payload, erpt_pointer); + mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id); +} + +/* IEDR - Infrastructure Entry Delete Register + * ---------------------------------------------------- + * This register is used for deleting entries from the entry tables. + * It is legitimate to attempt to delete a nonexisting entry (the device will + * respond as a good flow). + */ +#define MLXSW_REG_IEDR_ID 0x3804 +#define MLXSW_REG_IEDR_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_IEDR_REC_LEN 0x8 /* record length */ +#define MLXSW_REG_IEDR_REC_MAX_COUNT 64 +#define MLXSW_REG_IEDR_LEN (MLXSW_REG_IEDR_BASE_LEN + \ + MLXSW_REG_IEDR_REC_LEN * \ + MLXSW_REG_IEDR_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(iedr, MLXSW_REG_IEDR_ID, MLXSW_REG_IEDR_LEN); + +/* reg_iedr_num_rec + * Number of records. + * Access: OP + */ +MLXSW_ITEM32(reg, iedr, num_rec, 0x00, 0, 8); + +/* reg_iedr_rec_type + * Resource type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_type, MLXSW_REG_IEDR_BASE_LEN, 24, 8, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_size + * Size of entries do be deleted. The unit is 1 entry, regardless of entry type. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_size, MLXSW_REG_IEDR_BASE_LEN, 0, 11, + MLXSW_REG_IEDR_REC_LEN, 0x00, false); + +/* reg_iedr_rec_index_start + * Resource index start. + * Access: OP + */ +MLXSW_ITEM32_INDEXED(reg, iedr, rec_index_start, MLXSW_REG_IEDR_BASE_LEN, 0, 24, + MLXSW_REG_IEDR_REC_LEN, 0x04, false); + +static inline void mlxsw_reg_iedr_pack(char *payload) +{ + MLXSW_REG_ZERO(iedr, payload); +} + +static inline void mlxsw_reg_iedr_rec_pack(char *payload, int rec_index, + u8 rec_type, u16 rec_size, + u32 rec_index_start) +{ + u8 num_rec = mlxsw_reg_iedr_num_rec_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_iedr_num_rec_set(payload, rec_index + 1); + mlxsw_reg_iedr_rec_type_set(payload, rec_index, rec_type); + mlxsw_reg_iedr_rec_size_set(payload, rec_index, rec_size); + mlxsw_reg_iedr_rec_index_start_set(payload, rec_index, rec_index_start); +} + +/* QPTS - QoS Priority Trust State Register + * ---------------------------------------- + * This register controls the port policy to calculate the switch priority and + * packet color based on incoming packet fields. + */ +#define MLXSW_REG_QPTS_ID 0x4002 +#define MLXSW_REG_QPTS_LEN 0x8 + +MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN); + +/* reg_qpts_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. + */ +MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8); + +enum mlxsw_reg_qpts_trust_state { + MLXSW_REG_QPTS_TRUST_STATE_PCP = 1, + MLXSW_REG_QPTS_TRUST_STATE_DSCP = 2, /* For MPLS, trust EXP. */ +}; + +/* reg_qpts_trust_state + * Trust state for a given port. + * Access: RW + */ +MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3); + +static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + MLXSW_REG_ZERO(qpts, payload); + + mlxsw_reg_qpts_local_port_set(payload, local_port); + mlxsw_reg_qpts_trust_state_set(payload, ts); +} + /* QPCR - QoS Policer Configuration Register * ----------------------------------------- * The QPCR register is used to create policers - that limit @@ -2753,6 +3367,183 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, mlxsw_reg_qeec_next_element_index_set(payload, next_index); } +/* QRWE - QoS ReWrite Enable + * ------------------------- + * This register configures the rewrite enable per receive port. + */ +#define MLXSW_REG_QRWE_ID 0x400F +#define MLXSW_REG_QRWE_LEN 0x08 + +MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN); + +/* reg_qrwe_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. No support for router port. + */ +MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8); + +/* reg_qrwe_dscp + * Whether to enable DSCP rewrite (default is 0, don't rewrite). + * Access: RW + */ +MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1); + +/* reg_qrwe_pcp + * Whether to enable PCP and DEI rewrite (default is 0, don't rewrite). + * Access: RW + */ +MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1); + +static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port, + bool rewrite_pcp, bool rewrite_dscp) +{ + MLXSW_REG_ZERO(qrwe, payload); + mlxsw_reg_qrwe_local_port_set(payload, local_port); + mlxsw_reg_qrwe_pcp_set(payload, rewrite_pcp); + mlxsw_reg_qrwe_dscp_set(payload, rewrite_dscp); +} + +/* QPDSM - QoS Priority to DSCP Mapping + * ------------------------------------ + * QoS Priority to DSCP Mapping Register + */ +#define MLXSW_REG_QPDSM_ID 0x4011 +#define MLXSW_REG_QPDSM_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN 0x4 /* record length */ +#define MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT 16 +#define MLXSW_REG_QPDSM_LEN (MLXSW_REG_QPDSM_BASE_LEN + \ + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN * \ + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN); + +/* reg_qpdsm_local_port + * Local Port. Supported for data packets from CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8); + +/* reg_qpdsm_prio_entry_color0_e + * Enable update of the entry for color 0 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_e, + MLXSW_REG_QPDSM_BASE_LEN, 31, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color0_dscp + * DSCP field in the outer label of the packet for color 0 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color0_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 24, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color1_e + * Enable update of the entry for color 1 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_e, + MLXSW_REG_QPDSM_BASE_LEN, 23, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color1_dscp + * DSCP field in the outer label of the packet for color 1 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color1_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 16, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color2_e + * Enable update of the entry for color 2 and a given port. + * Access: WO + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_e, + MLXSW_REG_QPDSM_BASE_LEN, 15, 1, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdsm_prio_entry_color2_dscp + * DSCP field in the outer label of the packet for color 2 and a given port. + * Reserved when e=0. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp, + MLXSW_REG_QPDSM_BASE_LEN, 8, 6, + MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(qpdsm, payload); + mlxsw_reg_qpdsm_local_port_set(payload, local_port); +} + +static inline void +mlxsw_reg_qpdsm_prio_pack(char *payload, unsigned short prio, u8 dscp) +{ + mlxsw_reg_qpdsm_prio_entry_color0_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color0_dscp_set(payload, prio, dscp); + mlxsw_reg_qpdsm_prio_entry_color1_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color1_dscp_set(payload, prio, dscp); + mlxsw_reg_qpdsm_prio_entry_color2_e_set(payload, prio, 1); + mlxsw_reg_qpdsm_prio_entry_color2_dscp_set(payload, prio, dscp); +} + +/* QPDPM - QoS Port DSCP to Priority Mapping Register + * -------------------------------------------------- + * This register controls the mapping from DSCP field to + * Switch Priority for IP packets. + */ +#define MLXSW_REG_QPDPM_ID 0x4013 +#define MLXSW_REG_QPDPM_BASE_LEN 0x4 /* base length, without records */ +#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN 0x2 /* record length */ +#define MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT 64 +#define MLXSW_REG_QPDPM_LEN (MLXSW_REG_QPDPM_BASE_LEN + \ + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN * \ + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN); + +/* reg_qpdpm_local_port + * Local Port. Supported for data packets from CPU port. + * Access: Index + */ +MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8); + +/* reg_qpdpm_dscp_e + * Enable update of the specific entry. When cleared, the switch_prio and color + * fields are ignored and the previous switch_prio and color values are + * preserved. + * Access: WO + */ +MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_e, MLXSW_REG_QPDPM_BASE_LEN, 15, 1, + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +/* reg_qpdpm_dscp_prio + * The new Switch Priority value for the relevant DSCP value. + * Access: RW + */ +MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio, + MLXSW_REG_QPDPM_BASE_LEN, 0, 4, + MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(qpdpm, payload); + mlxsw_reg_qpdpm_local_port_set(payload, local_port); +} + +static inline void +mlxsw_reg_qpdpm_dscp_pack(char *payload, unsigned short dscp, u8 prio) +{ + mlxsw_reg_qpdpm_dscp_entry_e_set(payload, dscp, 1); + mlxsw_reg_qpdpm_dscp_entry_prio_set(payload, dscp, prio); +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -3350,6 +4141,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2, MLXSW_REG_PPCNT_EXT_CNT = 0x5, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, @@ -3508,6 +4300,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet RFC 2819 Counter Group */ + +/* reg_ppcnt_ether_stats_pkts64octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); + +/* reg_ppcnt_ether_stats_pkts65to127octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); + +/* reg_ppcnt_ether_stats_pkts128to255octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); + +/* reg_ppcnt_ether_stats_pkts256to511octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); + +/* reg_ppcnt_ether_stats_pkts512to1023octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1024to1518octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64); + +/* reg_ppcnt_ether_stats_pkts1519to2047octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64); + +/* reg_ppcnt_ether_stats_pkts2048to4095octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); + +/* reg_ppcnt_ether_stats_pkts4096to8191octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64); + +/* reg_ppcnt_ether_stats_pkts8192to10239octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64); + /* Ethernet Extended Counter Group Counters */ /* reg_ppcnt_ecn_marked @@ -4338,6 +5192,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); */ MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); +/* reg_ritr_if_vrrp_id_ipv6 + * VRRP ID for IPv6 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8); + +/* reg_ritr_if_vrrp_id_ipv4 + * VRRP ID for IPv4 + * Note: Reserved for RIF types other than VLAN, FID and Sub-port. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8); + /* VLAN Interface */ /* reg_ritr_vlan_if_vid @@ -7871,6 +8739,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(spvmlr), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), + MLXSW_REG(pgcr), MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), @@ -7879,9 +8748,19 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(prcr), MLXSW_REG(pefa), MLXSW_REG(ptce2), + MLXSW_REG(perpt), + MLXSW_REG(perar), + MLXSW_REG(ptce3), + MLXSW_REG(percr), + MLXSW_REG(pererp), + MLXSW_REG(iedr), + MLXSW_REG(qpts), MLXSW_REG(qpcr), MLXSW_REG(qtct), MLXSW_REG(qeec), + MLXSW_REG(qrwe), + MLXSW_REG(qpdsm), + MLXSW_REG(qpdpm), MLXSW_REG(pmlp), MLXSW_REG(pmtu), MLXSW_REG(ptys), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index fd9299ccec72..bf650f2cd5af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -42,6 +42,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SIZE, MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, + MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE, + MLXSW_RES_ID_MAX_KVD_ACTION_SETS, MLXSW_RES_ID_MAX_TRAP_GROUPS, MLXSW_RES_ID_CQE_V0, MLXSW_RES_ID_CQE_V1, @@ -63,6 +65,13 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_ACTIONS_PER_SET, + MLXSW_RES_ID_ACL_MAX_ERPT_BANKS, + MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE, + MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB, + MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -83,6 +92,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SIZE] = 0x1001, [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, + [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005, + [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, [MLXSW_RES_ID_CQE_V0] = 0x2210, [MLXSW_RES_ID_CQE_V1] = 0x2211, @@ -104,6 +115,13 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, + [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940, + [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941, + [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952, + [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 968b88af2ef5..039228525fb1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -74,17 +74,25 @@ #include "spectrum_span.h" #include "../mlxfw/mlxfw.h" -#define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1620 -#define MLXSW_FWREV_SUBMINOR 192 -#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) +#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) -#define MLXSW_SP_FW_FILENAME \ - "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \ - "." __stringify(MLXSW_FWREV_MINOR) \ - "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP1_FWREV_MAJOR 13 +#define MLXSW_SP1_FWREV_MINOR 1620 +#define MLXSW_SP1_FWREV_SUBMINOR 192 -static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; +static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { + .major = MLXSW_SP1_FWREV_MAJOR, + .minor = MLXSW_SP1_FWREV_MINOR, + .subminor = MLXSW_SP1_FWREV_SUBMINOR, +}; + +#define MLXSW_SP1_FW_FILENAME \ + "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \ + "." __stringify(MLXSW_SP1_FWREV_MINOR) \ + "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" + +static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp_driver_version[] = "1.0"; /* tx_hdr_version @@ -338,29 +346,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; + const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; + const char *fw_filename = mlxsw_sp->fw_filename; const struct firmware *firmware; int err; + /* Don't check if driver does not require it */ + if (!req_rev || !fw_filename) + return 0; + /* Validate driver & FW are compatible */ - if (rev->major != MLXSW_FWREV_MAJOR) { + if (rev->major != req_rev->major) { WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", - rev->major, MLXSW_FWREV_MAJOR); + rev->major, req_rev->major); return -EINVAL; } - if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR)) + if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) return 0; dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", rev->major, rev->minor, rev->subminor); dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); - err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME, + err = request_firmware_direct(&firmware, fw_filename, mlxsw_sp->bus_info->dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n", - MLXSW_SP_FW_FILENAME); + fw_filename); return err; } @@ -1441,6 +1455,11 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block, return 0; case TC_CLSFLOWER_STATS: return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f); + case TC_CLSFLOWER_TMPLT_CREATE: + return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f); + case TC_CLSFLOWER_TMPLT_DESTROY: + mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f); + return 0; default: return -EOPNOTSUPP; } @@ -1503,7 +1522,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type, static int mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, - struct tcf_block *block, bool ingress) + struct tcf_block *block, bool ingress, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_acl_block *acl_block; @@ -1518,7 +1538,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; block_cb = __tcf_block_cb_register(block, mlxsw_sp_setup_tc_block_cb_flower, - mlxsw_sp, acl_block); + mlxsw_sp, acl_block, extack); if (IS_ERR(block_cb)) { err = PTR_ERR(block_cb); goto err_cb_register; @@ -1541,7 +1561,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port, err_block_bind: if (!tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); err_cb_register: mlxsw_sp_acl_block_destroy(acl_block); } @@ -1571,7 +1591,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block, mlxsw_sp_port, ingress); if (!err && !tcf_block_cb_decref(block_cb)) { - __tcf_block_cb_unregister(block_cb); + __tcf_block_cb_unregister(block, block_cb); mlxsw_sp_acl_block_destroy(acl_block); } } @@ -1596,11 +1616,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, switch (f->command) { case TC_BLOCK_BIND: err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port, - mlxsw_sp_port); + mlxsw_sp_port, f->extack); if (err) return err; err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, - f->block, ingress); + f->block, ingress, + f->extack); if (err) { tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); return err; @@ -1712,7 +1733,8 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, + sizeof(drvinfo->driver)); strlcpy(drvinfo->version, mlxsw_sp_driver_version, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), @@ -1873,6 +1895,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { + { + .str = "ether_pkts64octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, + }, + { + .str = "ether_pkts65to127octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, + }, + { + .str = "ether_pkts128to255octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, + }, + { + .str = "ether_pkts256to511octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, + }, + { + .str = "ether_pkts512to1023octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, + }, + { + .str = "ether_pkts1024to1518octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, + }, + { + .str = "ether_pkts1519to2047octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, + }, + { + .str = "ether_pkts2048to4095octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, + }, + { + .str = "ether_pkts4096to8191octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, + }, + { + .str = "ether_pkts8192to10239octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { { .str = "rx_octets_prio", @@ -1964,6 +2032,11 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) mlxsw_sp_port_get_prio_strings(&p, i); @@ -2003,10 +2076,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, int *p_len, enum mlxsw_reg_ppcnt_grp grp) { switch (grp) { - case MLXSW_REG_PPCNT_IEEE_8023_CNT: + case MLXSW_REG_PPCNT_IEEE_8023_CNT: *p_hw_stats = mlxsw_sp_port_hw_stats; *p_len = MLXSW_SP_PORT_HW_STATS_LEN; break; + case MLXSW_REG_PPCNT_RFC_2819_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + break; case MLXSW_REG_PPCNT_PRIO_CNT: *p_hw_stats = mlxsw_sp_port_hw_prio_stats; *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; @@ -2056,6 +2133,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, data, data_index); data_index = MLXSW_SP_PORT_HW_STATS_LEN; + /* RFC 2819 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + /* Per-Priority Counters */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, @@ -3371,6 +3453,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -3757,6 +3841,36 @@ err_fids_init: return err; } +static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev; + mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME; + mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + +static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; + mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; + mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; + mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; + + return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); +} + static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3777,7 +3891,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_kvdl_fini(mlxsw_sp); } -static const struct mlxsw_config_profile mlxsw_sp_config_profile = { +static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, .used_flood_tables = 1, @@ -3803,6 +3917,28 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { }, }; +static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { + .used_max_mid = 1, + .max_mid = MLXSW_SP_MID_MAX, + .used_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .max_fid_offset_flood_tables = 3, + .fid_offset_flood_table_size = VLAN_N_VID - 1, + .max_fid_flood_tables = 3, + .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + static void mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, struct devlink_resource_size_params *kvd_size_params, @@ -3839,7 +3975,7 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, DEVLINK_RESOURCE_UNIT_ENTRY); } -static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) +static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core) { struct devlink *devlink = priv_to_devlink(mlxsw_core); struct devlink_resource_size_params hash_single_size_params; @@ -3850,7 +3986,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) const struct mlxsw_config_profile *profile; int err; - profile = &mlxsw_sp_config_profile; + profile = &mlxsw_sp1_config_profile; if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) return -EIO; @@ -3876,7 +4012,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (err) return err; - err = mlxsw_sp_kvdl_resources_register(mlxsw_core); + err = mlxsw_sp1_kvdl_resources_register(mlxsw_core); if (err) return err; @@ -3905,6 +4041,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) return 0; } +static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_sp1_resources_kvd_register(mlxsw_core); +} + +static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) +{ + return 0; +} + static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, u64 *p_single_size, u64 *p_double_size, @@ -3960,10 +4106,10 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, return 0; } -static struct mlxsw_driver mlxsw_sp_driver = { - .kind = mlxsw_sp_driver_name, +static struct mlxsw_driver mlxsw_sp1_driver = { + .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp_init, + .init = mlxsw_sp1_init, .fini = mlxsw_sp_fini, .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, .port_split = mlxsw_sp_port_split, @@ -3979,10 +4125,35 @@ static struct mlxsw_driver mlxsw_sp_driver = { .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, - .resources_register = mlxsw_sp_resources_register, + .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, - .profile = &mlxsw_sp_config_profile, + .profile = &mlxsw_sp1_config_profile, + .res_query_enabled = true, +}; + +static struct mlxsw_driver mlxsw_sp2_driver = { + .kind = mlxsw_sp2_driver_name, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp2_init, + .fini = mlxsw_sp_fini, + .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .resources_register = mlxsw_sp2_resources_register, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, }; @@ -4397,7 +4568,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && - !netif_is_ovs_master(upper_dev)) { + !netif_is_ovs_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } @@ -4423,6 +4595,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; @@ -4461,6 +4638,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); else mlxsw_sp_port_ovs_leave(mlxsw_sp_port); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } break; } @@ -4545,8 +4725,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); + if (!netif_is_bridge_master(upper_dev) && + !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@ -4558,6 +4739,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4571,6 +4757,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, mlxsw_sp_port_bridge_leave(mlxsw_sp_port, vlan_dev, upper_dev); + } else if (netif_is_macvlan(upper_dev)) { + if (!info->linking) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); } else { err = -EINVAL; WARN_ON(1); @@ -4620,6 +4809,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return 0; } +static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + + if (!mlxsw_sp) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EOPNOTSUPP; + } + if (!info->linking) + break; + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->linking) + break; + if (netif_is_macvlan(upper_dev)) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); + break; + } + + return 0; +} + +static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + + if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + + return -EOPNOTSUPP; +} + static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) { struct netdev_notifier_changeupper_info *info = ptr; @@ -4661,6 +4908,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); else if (is_vlan_dev(dev)) err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (netif_is_macvlan(dev)) + err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr); return notifier_from_errno(err); } @@ -4681,14 +4932,24 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { .notifier_call = mlxsw_sp_inet6addr_event, }; -static const struct pci_device_id mlxsw_sp_pci_id_table[] = { +static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, }, }; -static struct pci_driver mlxsw_sp_pci_driver = { - .name = mlxsw_sp_driver_name, - .id_table = mlxsw_sp_pci_id_table, +static struct pci_driver mlxsw_sp1_pci_driver = { + .name = mlxsw_sp1_driver_name, + .id_table = mlxsw_sp1_pci_id_table, +}; + +static const struct pci_device_id mlxsw_sp2_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0}, + {0, }, +}; + +static struct pci_driver mlxsw_sp2_pci_driver = { + .name = mlxsw_sp2_driver_name, + .id_table = mlxsw_sp2_pci_id_table, }; static int __init mlxsw_sp_module_init(void) @@ -4700,19 +4961,31 @@ static int __init mlxsw_sp_module_init(void) register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); - err = mlxsw_core_driver_register(&mlxsw_sp_driver); + err = mlxsw_core_driver_register(&mlxsw_sp1_driver); + if (err) + goto err_sp1_core_driver_register; + + err = mlxsw_core_driver_register(&mlxsw_sp2_driver); + if (err) + goto err_sp2_core_driver_register; + + err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver); if (err) - goto err_core_driver_register; + goto err_sp1_pci_driver_register; - err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver); + err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver); if (err) - goto err_pci_driver_register; + goto err_sp2_pci_driver_register; return 0; -err_pci_driver_register: - mlxsw_core_driver_unregister(&mlxsw_sp_driver); -err_core_driver_register: +err_sp2_pci_driver_register: + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); +err_sp1_pci_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); +err_sp2_core_driver_register: + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); +err_sp1_core_driver_register: unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4722,8 +4995,10 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { - mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); - mlxsw_core_driver_unregister(&mlxsw_sp_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); + mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); + mlxsw_core_driver_unregister(&mlxsw_sp2_driver); + mlxsw_core_driver_unregister(&mlxsw_sp1_driver); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4736,5 +5011,6 @@ module_exit(mlxsw_sp_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Spectrum driver"); -MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table); -MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME); +MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table); +MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); +MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4a519d8edec8..13eca1a79d52 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.h - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> @@ -54,6 +54,7 @@ #include "core.h" #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" +#include "reg.h" #define MLXSW_SP_FID_8021D_MAX 1024 @@ -145,6 +146,9 @@ struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; struct mlxsw_sp_kvdl; +struct mlxsw_sp_kvdl_ops; +struct mlxsw_sp_mr_tcam_ops; +struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -168,6 +172,13 @@ struct mlxsw_sp { struct mlxsw_sp_span_entry *entries; int entries_count; } span; + const struct mlxsw_fw_rev *req_rev; + const char *fw_filename; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; + const struct mlxsw_afa_ops *afa_ops; + const struct mlxsw_afk_ops *afk_ops; + const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; + const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; }; static inline struct mlxsw_sp_upper * @@ -233,6 +244,7 @@ struct mlxsw_sp_port { struct ieee_ets *ets; struct ieee_maxrate *maxrate; struct ieee_pfc *pfc; + enum mlxsw_reg_qpts_trust_state trust_state; } dcb; struct { u8 module; @@ -407,6 +419,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, @@ -435,15 +449,62 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ +enum mlxsw_sp_kvdl_entry_type { + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, +}; + +static inline unsigned int +mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) +{ + switch (type) { + case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + default: + return 1; + } +} + +struct mlxsw_sp_kvdl_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index); + void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index); + int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count); + int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv); +}; + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index); -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size); -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index); +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index); +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count); + +/* spectrum1_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops; +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core); + +/* spectrum2_kvdl.c */ +extern const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops; struct mlxsw_sp_acl_rule_info { unsigned int priority; @@ -452,44 +513,14 @@ struct mlxsw_sp_acl_rule_info { unsigned int counter_index; }; -enum mlxsw_sp_acl_profile { - MLXSW_SP_ACL_PROFILE_FLOWER, -}; - -struct mlxsw_sp_acl_profile_ops { - size_t ruleset_priv_size; - int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv); - void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); - int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, - struct mlxsw_sp_port *mlxsw_sp_port, - bool ingress); - u16 (*ruleset_group_id)(void *ruleset_priv); - size_t rule_priv_size; - int (*rule_add)(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, - struct mlxsw_sp_acl_rule_info *rulei); - void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); - int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, - bool *activity); -}; - -struct mlxsw_sp_acl_ops { - size_t priv_size; - int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); - void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); - const struct mlxsw_sp_acl_profile_ops * - (*profile_ops)(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_acl_profile profile); -}; - struct mlxsw_sp_acl_block; struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ +enum mlxsw_sp_acl_profile { + MLXSW_SP_ACL_PROFILE_FLOWER, +}; + struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block); unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block); @@ -514,7 +545,8 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - enum mlxsw_sp_acl_profile profile); + enum mlxsw_sp_acl_profile profile, + struct mlxsw_afk_element_usage *tmplt_elusage); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset); @@ -541,25 +573,30 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_block *block, - struct net_device *out_dev); + struct net_device *out_dev, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - struct net_device *out_dev); + struct net_device *out_dev, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u32 action, u16 vid, u16 proto, u8 prio); + u32 action, u16 vid, u16 proto, u8 prio, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei); + struct mlxsw_sp_acl_rule_info *rulei, + struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u16 fid); + u16 fid, struct netlink_ext_ack *extack); struct mlxsw_sp_acl_rule; struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - unsigned long cookie); + unsigned long cookie, + struct netlink_ext_ack *extack); void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule); int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, @@ -582,7 +619,52 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); /* spectrum_acl_tcam.c */ -extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; +struct mlxsw_sp_acl_tcam; +struct mlxsw_sp_acl_tcam_region; + +struct mlxsw_sp_acl_tcam_ops { + enum mlxsw_reg_ptar_key_type key_type; + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); + size_t region_priv_size; + int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, + struct mlxsw_sp_acl_tcam_region *region); + void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); + int (*region_associate)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region); + size_t chunk_priv_size; + void (*chunk_init)(void *region_priv, void *chunk_priv, + unsigned int priority); + void (*chunk_fini)(void *chunk_priv); + size_t entry_priv_size; + int (*entry_add)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*entry_del)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv); + int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity); +}; + +/* spectrum1_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops; + +/* spectrum2_acl_tcam.c */ +extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops; + +/* spectrum_acl_flex_actions.c */ +extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops; +extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops; + +/* spectrum_acl_flex_keys.c */ +extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; +extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, @@ -594,6 +676,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct tc_cls_flower_offload *f); +int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f); +void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f); /* spectrum_qdisc.c */ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port); @@ -631,4 +719,40 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp); +/* spectrum_mr.c */ +enum mlxsw_sp_mr_route_prio { + MLXSW_SP_MR_ROUTE_PRIO_SG, + MLXSW_SP_MR_ROUTE_PRIO_STARG, + MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, + __MLXSW_SP_MR_ROUTE_PRIO_MAX +}; + +#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) + +struct mlxsw_sp_mr_route_key; + +struct mlxsw_sp_mr_tcam_ops { + size_t priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + void (*fini)(void *priv); + size_t route_priv_size; + int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio); + void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key); + int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block); +}; + +/* spectrum1_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; + +/* spectrum2_mr_tcam.c */ +extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c new file mode 100644 index 000000000000..5c8956573632 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -0,0 +1,275 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +struct mlxsw_sp1_acl_tcam_region { + struct mlxsw_sp_acl_ctcam_region cregion; + struct mlxsw_sp_acl_tcam_region *region; + struct { + struct mlxsw_sp_acl_ctcam_chunk cchunk; + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_rule_info *rulei; + } catchall; +}; + +struct mlxsw_sp1_acl_tcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp1_acl_tcam_entry { + struct mlxsw_sp_acl_ctcam_entry centry; +}; + +static int +mlxsw_sp1_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask) +{ + return 0; +} + +static void +mlxsw_sp1_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ +} + +static const struct mlxsw_sp_acl_ctcam_region_ops +mlxsw_sp1_acl_ctcam_region_ops = { + .entry_insert = mlxsw_sp1_acl_ctcam_region_entry_insert, + .entry_remove = mlxsw_sp1_acl_ctcam_region_entry_remove, +}; + +static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *tcam) +{ + return 0; +} + +static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ +} + +static int +mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei; + int err; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, + ®ion->catchall.cchunk, + MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + if (IS_ERR(rulei)) { + err = PTR_ERR(rulei); + goto err_rulei_create; + } + err = mlxsw_sp_acl_rulei_act_continue(rulei); + if (WARN_ON(err)) + goto err_rulei_act_continue; + err = mlxsw_sp_acl_rulei_commit(rulei); + if (err) + goto err_rulei_commit; + err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry, + rulei, false); + if (err) + goto err_entry_add; + region->catchall.rulei = rulei; + return 0; + +err_entry_add: +err_rulei_commit: +err_rulei_act_continue: + mlxsw_sp_acl_rulei_destroy(rulei); +err_rulei_create: + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); + return err; +} + +static void +mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_acl_tcam_region *region) +{ + struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + ®ion->catchall.cchunk, + ®ion->catchall.centry); + mlxsw_sp_acl_rulei_destroy(rulei); + mlxsw_sp_acl_ctcam_chunk_fini(®ion->catchall.cchunk); +} + +static int +mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + int err; + + err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, ®ion->cregion, + _region, + &mlxsw_sp1_acl_ctcam_region_ops); + if (err) + return err; + err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region); + if (err) + goto err_catchall_add; + region->region = _region; + return 0; + +err_catchall_add: + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); + return err; +} + +static void +mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + + mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region); + mlxsw_sp_acl_ctcam_region_fini(®ion->cregion); +} + +static int +mlxsw_sp1_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return 0; +} + +static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, &chunk->cchunk, + priority); +} + +static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk); +} + +static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry, + rulei, false); +} + +static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, ®ion->cregion, + &chunk->cchunk, &entry->centry); +} + +static int +mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *_region, + unsigned int offset, + bool *activity) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + int err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, + _region->tcam_region_info, offset, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + return err; + *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); + return 0; +} + +static int +mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp1_acl_tcam_region *region = region_priv; + struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv; + unsigned int offset; + + offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry); + return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp, + region->region, + offset, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX, + .priv_size = 0, + .init = mlxsw_sp1_acl_tcam_init, + .fini = mlxsw_sp1_acl_tcam_fini, + .region_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_region), + .region_init = mlxsw_sp1_acl_tcam_region_init, + .region_fini = mlxsw_sp1_acl_tcam_region_fini, + .region_associate = mlxsw_sp1_acl_tcam_region_associate, + .chunk_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_chunk), + .chunk_init = mlxsw_sp1_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp1_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry), + .entry_add = mlxsw_sp1_acl_tcam_entry_add, + .entry_del = mlxsw_sp1_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c new file mode 100644 index 000000000000..0d4583894a97 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c @@ -0,0 +1,459 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum.h" + +#define MLXSW_SP1_KVDL_SINGLE_BASE 0 +#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP1_KVDL_SINGLE_END \ + (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1) + +#define MLXSW_SP1_KVDL_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE) +#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152 +#define MLXSW_SP1_KVDL_CHUNKS_END \ + (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \ + (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE) +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \ + (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1) + +#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1 +#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32 +#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 + +struct mlxsw_sp1_kvdl_part_info { + unsigned int part_index; + unsigned int start_index; + unsigned int end_index; + unsigned int alloc_size; + enum mlxsw_sp_resource_id resource_id; +}; + +enum mlxsw_sp1_kvdl_part_id { + MLXSW_SP1_KVDL_PART_ID_SINGLE, + MLXSW_SP1_KVDL_PART_ID_CHUNKS, + MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS, +}; + +#define MLXSW_SP1_KVDL_PART_INFO(id) \ +[MLXSW_SP1_KVDL_PART_ID_##id] = { \ + .start_index = MLXSW_SP1_KVDL_##id##_BASE, \ + .end_index = MLXSW_SP1_KVDL_##id##_END, \ + .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \ + .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ +} + +static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = { + MLXSW_SP1_KVDL_PART_INFO(SINGLE), + MLXSW_SP1_KVDL_PART_INFO(CHUNKS), + MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS), +}; + +#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info) + +struct mlxsw_sp1_kvdl_part { + struct mlxsw_sp1_kvdl_part_info info; + unsigned long usage[0]; /* Entries */ +}; + +struct mlxsw_sp1_kvdl { + struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN]; +}; + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl, + unsigned int alloc_size) +{ + struct mlxsw_sp1_kvdl_part *part, *min_part = NULL; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (alloc_size <= part->info.alloc_size && + (!min_part || + part->info.alloc_size <= min_part->info.alloc_size)) + min_part = part; + } + + return min_part ?: ERR_PTR(-ENOBUFS); +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index) +{ + struct mlxsw_sp1_kvdl_part *part; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (kvdl_index >= part->info.start_index && + kvdl_index <= part->info.end_index) + return part; + } + + return ERR_PTR(-EINVAL); +} + +static u32 +mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info, + unsigned int entry_index) +{ + return info->start_index + entry_index * info->alloc_size; +} + +static unsigned int +mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info, + u32 kvdl_index) +{ + return (kvdl_index - info->start_index) / info->alloc_size; +} + +static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part, + u32 *p_kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index, nr_entries; + + nr_entries = (info->end_index - info->start_index + 1) / + info->alloc_size; + entry_index = find_first_zero_bit(part->usage, nr_entries); + if (entry_index == nr_entries) + return -ENOBUFS; + __set_bit(entry_index, part->usage); + + *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index); + + return 0; +} + +static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part, + u32 kvdl_index) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int entry_index; + + entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index); + __clear_bit(entry_index, part->usage); +} + +static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + u32 *p_entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + /* Find partition with smallest allocation size satisfying the + * requested size. + */ + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index); +} + +static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index); + if (IS_ERR(part)) + return; + mlxsw_sp1_kvdl_part_free(part, entry_index); +} + +static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_size) +{ + struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + *p_alloc_size = part->info.alloc_size; + + return 0; +} + +static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part, + struct mlxsw_sp1_kvdl_part *part_prev, + unsigned int size) +{ + if (!part_prev) { + part->info.end_index = size - 1; + } else { + part->info.start_index = part_prev->info.end_index + 1; + part->info.end_index = part->info.start_index + size - 1; + } +} + +static struct mlxsw_sp1_kvdl_part * +mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp1_kvdl_part_info *info, + struct mlxsw_sp1_kvdl_part *part_prev) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl_part *part; + bool need_update = true; + unsigned int nr_entries; + size_t usage_size; + u64 resource_size; + int err; + + err = devlink_resource_size_get(devlink, info->resource_id, + &resource_size); + if (err) { + need_update = false; + resource_size = info->end_index - info->start_index + 1; + } + + nr_entries = div_u64(resource_size, info->alloc_size); + usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + + memcpy(&part->info, info, sizeof(part->info)); + + if (need_update) + mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size); + return part; +} + +static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_kvdl *kvdl) +{ + const struct mlxsw_sp1_kvdl_part_info *info; + struct mlxsw_sp1_kvdl_part *part_prev = NULL; + int err, i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp1_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info, + part_prev); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + part_prev = kvdl->parts[i]; + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]); +} + +static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part) +{ + const struct mlxsw_sp1_kvdl_part_info *info = &part->info; + unsigned int nr_entries; + int bit = -1; + u64 occ = 0; + + nr_entries = (info->end_index - + info->start_index + 1) / + info->alloc_size; + while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) + < nr_entries) + occ += info->alloc_size; + return occ; +} + +static u64 mlxsw_sp1_kvdl_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + u64 occ = 0; + int i; + + for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) + occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]); + + return occ; +} + +static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv) +{ + const struct mlxsw_sp1_kvdl *kvdl = priv; + struct mlxsw_sp1_kvdl_part *part; + + part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS]; + return mlxsw_sp1_kvdl_part_occ(part); +} + +static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + int err; + + err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl); + if (err) + return err; + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR, + mlxsw_sp1_kvdl_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + mlxsw_sp1_kvdl_single_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + mlxsw_sp1_kvdl_chunks_occ_get, + kvdl); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + mlxsw_sp1_kvdl_large_chunks_occ_get, + kvdl); + return 0; +} + +static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp1_kvdl *kvdl = priv; + + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_KVD_LINEAR); + mlxsw_sp1_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp1_kvdl), + .init = mlxsw_sp1_kvdl_init, + .fini = mlxsw_sp1_kvdl_fini, + .alloc = mlxsw_sp1_kvdl_alloc, + .free = mlxsw_sp1_kvdl_free, + .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query, +}; + +int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + static struct devlink_resource_size_params size_params; + u32 kvdl_max_size; + int err; + + kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, + MLXSW_SP1_KVDL_SINGLE_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, + MLXSW_SP1_KVDL_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c new file mode 100644 index 000000000000..fc649fe7134e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c @@ -0,0 +1,374 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/parman.h> + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_actions.h" +#include "spectrum_mr.h" + +struct mlxsw_sp1_mr_tcam_region { + struct mlxsw_sp *mlxsw_sp; + enum mlxsw_reg_rtar_key_type rtar_key_type; + struct parman *parman; + struct parman_prio *parman_prios; +}; + +struct mlxsw_sp1_mr_tcam { + struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; +}; + +struct mlxsw_sp1_mr_tcam_route { + struct parman_item parman_item; + struct parman_prio *parman_prio; +}; + +static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + ntohl(key->group.addr4), + ntohl(key->group_mask.addr4), + ntohl(key->source.addr4), + ntohl(key->source_mask.addr4), + mlxsw_afa_block_first_set(afa_block)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + key->group.addr6, + key->group_mask.addr6, + key->source.addr6, + key->source_mask.addr6, + mlxsw_afa_block_first_set(afa_block)); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key) +{ + struct in6_addr zero_addr = IN6ADDR_ANY_INIT; + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, 0, 0, 0, 0, NULL); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, + key->vrid, 0, 0, zero_addr, zero_addr, + zero_addr, zero_addr, NULL); + break; + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static struct mlxsw_sp1_mr_tcam_region * +mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + return &mr_tcam->tcam_regions[proto]; +} + +static int +mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + int err; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + err = parman_item_add(tcam_region->parman, + &tcam_region->parman_prios[prio], + &route->parman_item); + if (err) + return err; + + route->parman_prio = &tcam_region->parman_prios[prio]; + return 0; +} + +static void +mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam, + struct mlxsw_sp1_mr_tcam_route *route, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_region *tcam_region; + + tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto); + parman_item_remove(tcam_region->parman, + route->parman_prio, &route->parman_item); +} + +static int +mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + int err; + + err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route, + key, prio); + if (err) + return err; + + err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); + if (err) + goto err_route_replace; + return 0; + +err_route_replace: + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); + return err; +} + +static void +mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + + mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key); + mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); +} + +static int +mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + struct mlxsw_sp1_mr_tcam_route *route = route_priv; + + return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + key, afa_block); +} + +#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, + mr_tcam_region->rtar_key_type, + MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void +mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, + mr_tcam_region->rtar_key_type, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, + mr_tcam_region->rtar_key_type, new_count); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rrcr_pl[MLXSW_REG_RRCR_LEN]; + + mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, + from_index, count, + mr_tcam_region->rtar_key_type, to_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); +} + +static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = { + .base_count = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp1_mr_tcam_region_parman_resize, + .move = mlxsw_sp1_mr_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static int +mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp1_mr_tcam_region *mr_tcam_region, + enum mlxsw_reg_rtar_key_type rtar_key_type) +{ + struct parman_prio *parman_prios; + struct parman *parman; + int err; + int i; + + mr_tcam_region->rtar_key_type = rtar_key_type; + mr_tcam_region->mlxsw_sp = mlxsw_sp; + + err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region); + if (err) + return err; + + parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops, + mr_tcam_region); + if (!parman) { + err = -ENOMEM; + goto err_parman_create; + } + mr_tcam_region->parman = parman; + + parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, + sizeof(*parman_prios), GFP_KERNEL); + if (!parman_prios) { + err = -ENOMEM; + goto err_parman_prios_alloc; + } + mr_tcam_region->parman_prios = parman_prios; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_init(mr_tcam_region->parman, + &mr_tcam_region->parman_prios[i], i); + return 0; + +err_parman_prios_alloc: + parman_destroy(parman); +err_parman_create: + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); + return err; +} + +static void +mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region) +{ + int i; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_fini(&mr_tcam_region->parman_prios[i]); + kfree(mr_tcam_region->parman_prios); + parman_destroy(mr_tcam_region->parman); + mlxsw_sp1_mr_tcam_region_free(mr_tcam_region); +} + +static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + u32 rtar_key; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + return -EIO; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV4], + rtar_key); + if (err) + return err; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; + err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV6], + rtar_key); + if (err) + goto err_ipv6_region_init; + + return 0; + +err_ipv6_region_init: + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + return err; +} + +static void mlxsw_sp1_mr_tcam_fini(void *priv) +{ + struct mlxsw_sp1_mr_tcam *mr_tcam = priv; + struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); + mlxsw_sp1_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp1_mr_tcam), + .init = mlxsw_sp1_mr_tcam_init, + .fini = mlxsw_sp1_mr_tcam_fini, + .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route), + .route_create = mlxsw_sp1_mr_tcam_route_create, + .route_destroy = mlxsw_sp1_mr_tcam_route_destroy, + .route_update = mlxsw_sp1_mr_tcam_route_update, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c new file mode 100644 index 000000000000..22c876496379 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -0,0 +1,270 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> + +#include "spectrum.h" +#include "spectrum_acl_tcam.h" +#include "core_acl_flex_actions.h" + +struct mlxsw_sp2_acl_tcam { + struct mlxsw_sp_acl_atcam atcam; + u32 kvdl_index; + unsigned int kvdl_count; +}; + +struct mlxsw_sp2_acl_tcam_region { + struct mlxsw_sp_acl_atcam_region aregion; + struct mlxsw_sp_acl_tcam_region *region; +}; + +struct mlxsw_sp2_acl_tcam_chunk { + struct mlxsw_sp_acl_atcam_chunk achunk; +}; + +struct mlxsw_sp2_acl_tcam_entry { + struct mlxsw_sp_acl_atcam_entry aentry; + struct mlxsw_afa_block *act_block; +}; + +static int +mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask) +{ + struct mlxsw_sp_acl_atcam_region *aregion; + struct mlxsw_sp_acl_atcam_entry *aentry; + struct mlxsw_sp_acl_erp *erp; + + aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); + aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); + + erp = mlxsw_sp_acl_erp_get(aregion, mask, true); + if (IS_ERR(erp)) + return PTR_ERR(erp); + aentry->erp = erp; + + return 0; +} + +static void +mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + struct mlxsw_sp_acl_atcam_region *aregion; + struct mlxsw_sp_acl_atcam_entry *aentry; + + aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); + aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); + + mlxsw_sp_acl_erp_put(aregion, aentry->erp); +} + +static const struct mlxsw_sp_acl_ctcam_region_ops +mlxsw_sp2_acl_ctcam_region_ops = { + .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert, + .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove, +}; + +static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv, + struct mlxsw_sp_acl_tcam *_tcam) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + struct mlxsw_afa_block *afa_block; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + char pgcr_pl[MLXSW_REG_PGCR_LEN]; + char *enc_actions; + int i; + int err; + + tcam->kvdl_count = _tcam->max_regions; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, &tcam->kvdl_index); + if (err) + return err; + + /* Create flex action block, set default action (continue) + * but don't commit. We need just the current set encoding + * to be written using PEFA register to all indexes for all regions. + */ + afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); + if (!afa_block) { + err = -ENOMEM; + goto err_afa_block; + } + err = mlxsw_afa_block_continue(afa_block); + if (WARN_ON(err)) + goto err_afa_block_continue; + enc_actions = mlxsw_afa_block_cur_set(afa_block); + + for (i = 0; i < tcam->kvdl_count; i++) { + mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i, + true, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + } + mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl); + if (err) + goto err_pgcr_write; + + err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam); + if (err) + goto err_atcam_init; + + mlxsw_afa_block_destroy(afa_block); + return 0; + +err_atcam_init: +err_pgcr_write: +err_pefa_write: +err_afa_block_continue: + mlxsw_afa_block_destroy(afa_block); +err_afa_block: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); + return err; +} + +static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_acl_tcam *tcam = priv; + + mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + tcam->kvdl_count, tcam->kvdl_index); +} + +static int +mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, + void *tcam_priv, + struct mlxsw_sp_acl_tcam_region *_region) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam *tcam = tcam_priv; + + region->region = _region; + + return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam, + ®ion->aregion, _region, + &mlxsw_sp2_acl_ctcam_region_ops); +} + +static void +mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + + mlxsw_sp_acl_atcam_region_fini(®ion->aregion); +} + +static int +mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) +{ + return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); +} + +static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, + unsigned int priority) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_atcam_chunk_init(®ion->aregion, &chunk->achunk, + priority); +} + +static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv) +{ + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + + mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk); +} + +static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + entry->act_block = rulei->act_block; + return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, ®ion->aregion, + &chunk->achunk, &entry->aentry, + rulei); +} + +static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, ®ion->aregion, &chunk->achunk, + &entry->aentry); +} + +static int +mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *entry_priv, + bool *activity) +{ + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + return mlxsw_afa_block_activity_get(entry->act_block, activity); +} + +const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { + .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2, + .priv_size = sizeof(struct mlxsw_sp2_acl_tcam), + .init = mlxsw_sp2_acl_tcam_init, + .fini = mlxsw_sp2_acl_tcam_fini, + .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region), + .region_init = mlxsw_sp2_acl_tcam_region_init, + .region_fini = mlxsw_sp2_acl_tcam_region_fini, + .region_associate = mlxsw_sp2_acl_tcam_region_associate, + .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), + .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, + .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, + .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry), + .entry_add = mlxsw_sp2_acl_tcam_entry_add, + .entry_del = mlxsw_sp2_acl_tcam_entry_del, + .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c new file mode 100644 index 000000000000..bacf7483c3fb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -0,0 +1,302 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "resources.h" + +struct mlxsw_sp2_kvdl_part_info { + u8 res_type; + /* For each defined partititon we need to know how many + * usage bits we need and how many indexes there are + * represented by a single bit. This could be got from FW + * querying appropriate resources. So have the resource + * ids for for this purpose in partition definition. + */ + enum mlxsw_res_id usage_bit_count_res_id; + enum mlxsw_res_id index_range_res_id; +}; + +#define MLXSW_SP2_KVDL_PART_INFO(_entry_type, _res_type, \ + _usage_bit_count_res_id, _index_range_res_id) \ +[MLXSW_SP_KVDL_ENTRY_TYPE_##_entry_type] = { \ + .res_type = _res_type, \ + .usage_bit_count_res_id = MLXSW_RES_ID_##_usage_bit_count_res_id, \ + .index_range_res_id = MLXSW_RES_ID_##_index_range_res_id, \ +} + +static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { + MLXSW_SP2_KVDL_PART_INFO(ADJ, 0x21, KVD_SIZE, MAX_KVD_LINEAR_RANGE), + MLXSW_SP2_KVDL_PART_INFO(ACTSET, 0x23, MAX_KVD_ACTION_SETS, + MAX_KVD_ACTION_SETS), + MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), +}; + +#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) + +struct mlxsw_sp2_kvdl_part { + const struct mlxsw_sp2_kvdl_part_info *info; + unsigned int usage_bit_count; + unsigned int indexes_per_usage_bit; + unsigned int last_allocated_bit; + unsigned long usage[0]; /* Usage bits */ +}; + +struct mlxsw_sp2_kvdl { + struct mlxsw_sp2_kvdl_part *parts[MLXSW_SP2_KVDL_PARTS_INFO_LEN]; +}; + +static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part, + unsigned int bit_count, + unsigned int *p_bit) +{ + unsigned int start_bit; + unsigned int bit; + unsigned int i; + bool wrap = false; + + start_bit = part->last_allocated_bit + 1; + if (start_bit == part->usage_bit_count) + start_bit = 0; + bit = start_bit; +again: + bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit); + if (!wrap && bit + bit_count >= part->usage_bit_count) { + wrap = true; + bit = 0; + goto again; + } + if (wrap && bit + bit_count >= start_bit) + return -ENOBUFS; + for (i = 0; i < bit_count; i++) { + if (test_bit(bit + i, part->usage)) { + bit += bit_count; + goto again; + } + } + *p_bit = bit; + return 0; +} + +static int mlxsw_sp2_kvdl_part_alloc(struct mlxsw_sp2_kvdl_part *part, + unsigned int size, + u32 *p_kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + err = mlxsw_sp2_kvdl_part_find_zero_bits(part, bit_count, &bit); + if (err) + return err; + for (i = 0; i < bit_count; i++) + __set_bit(bit + i, part->usage); + *p_kvdl_index = bit * part->indexes_per_usage_bit; + return 0; +} + +static int mlxsw_sp2_kvdl_rec_del(struct mlxsw_sp *mlxsw_sp, u8 res_type, + u16 size, u32 kvdl_index) +{ + char *iedr_pl; + int err; + + iedr_pl = kmalloc(MLXSW_REG_IEDR_LEN, GFP_KERNEL); + if (!iedr_pl) + return -ENOMEM; + + mlxsw_reg_iedr_pack(iedr_pl); + mlxsw_reg_iedr_rec_pack(iedr_pl, 0, res_type, size, kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(iedr), iedr_pl); + kfree(iedr_pl); + return err; +} + +static void mlxsw_sp2_kvdl_part_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl_part *part, + unsigned int size, u32 kvdl_index) +{ + unsigned int bit_count; + unsigned int bit; + unsigned int i; + int err; + + /* We need to ask FW to delete previously used KVD linear index */ + err = mlxsw_sp2_kvdl_rec_del(mlxsw_sp, part->info->res_type, + size, kvdl_index); + if (err) + return; + + bit_count = DIV_ROUND_UP(size, part->indexes_per_usage_bit); + bit = kvdl_index / part->indexes_per_usage_bit; + for (i = 0; i < bit_count; i++) + __clear_bit(bit + i, part->usage); +} + +static int mlxsw_sp2_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + u32 *p_entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_alloc(part, size, p_entry_index); +} + +static void mlxsw_sp2_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + int entry_index) +{ + unsigned int size = entry_count * mlxsw_sp_kvdl_entry_size(type); + struct mlxsw_sp2_kvdl *kvdl = priv; + struct mlxsw_sp2_kvdl_part *part = kvdl->parts[type]; + + return mlxsw_sp2_kvdl_part_free(mlxsw_sp, part, size, entry_index); +} + +static int mlxsw_sp2_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + void *priv, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) +{ + *p_alloc_count = entry_count; + return 0; +} + +static struct mlxsw_sp2_kvdl_part * +mlxsw_sp2_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp2_kvdl_part_info *info) +{ + unsigned int indexes_per_usage_bit; + struct mlxsw_sp2_kvdl_part *part; + unsigned int index_range; + unsigned int usage_bit_count; + size_t usage_size; + + if (!mlxsw_core_res_valid(mlxsw_sp->core, + info->usage_bit_count_res_id) || + !mlxsw_core_res_valid(mlxsw_sp->core, + info->index_range_res_id)) + return ERR_PTR(-EIO); + usage_bit_count = mlxsw_core_res_get(mlxsw_sp->core, + info->usage_bit_count_res_id); + index_range = mlxsw_core_res_get(mlxsw_sp->core, + info->index_range_res_id); + + /* For some partitions, one usage bit represents a group of indexes. + * That's why we compute the number of indexes per usage bit here, + * according to queried resources. + */ + indexes_per_usage_bit = index_range / usage_bit_count; + + usage_size = BITS_TO_LONGS(usage_bit_count) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return ERR_PTR(-ENOMEM); + part->info = info; + part->usage_bit_count = usage_bit_count; + part->indexes_per_usage_bit = indexes_per_usage_bit; + part->last_allocated_bit = usage_bit_count - 1; + return part; +} + +static void mlxsw_sp2_kvdl_part_fini(struct mlxsw_sp2_kvdl_part *part) +{ + kfree(part); +} + +static int mlxsw_sp2_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp2_kvdl *kvdl) +{ + const struct mlxsw_sp2_kvdl_part_info *info; + int i; + int err; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp2_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp2_kvdl_part_init(mlxsw_sp, info); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); + goto err_kvdl_part_init; + } + } + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); + return err; +} + +static void mlxsw_sp2_kvdl_parts_fini(struct mlxsw_sp2_kvdl *kvdl) +{ + int i; + + for (i = 0; i < MLXSW_SP2_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp2_kvdl_part_fini(kvdl->parts[i]); +} + +static int mlxsw_sp2_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + return mlxsw_sp2_kvdl_parts_init(mlxsw_sp, kvdl); +} + +static void mlxsw_sp2_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp2_kvdl *kvdl = priv; + + mlxsw_sp2_kvdl_parts_fini(kvdl); +} + +const struct mlxsw_sp_kvdl_ops mlxsw_sp2_kvdl_ops = { + .priv_size = sizeof(struct mlxsw_sp2_kvdl), + .init = mlxsw_sp2_kvdl_init, + .fini = mlxsw_sp2_kvdl_fini, + .alloc = mlxsw_sp2_kvdl_alloc, + .free = mlxsw_sp2_kvdl_free, + .alloc_size_query = mlxsw_sp2_kvdl_alloc_size_query, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c new file mode 100644 index 000000000000..53d4ab70da95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -0,0 +1,82 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> + +#include "core_acl_flex_actions.h" +#include "spectrum.h" +#include "spectrum_mr.h" + +static int +mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block, + enum mlxsw_sp_mr_route_prio prio) +{ + return 0; +} + +static void +mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_key *key) +{ +} + +static int +mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + return 0; +} + +static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + return 0; +} + +static void mlxsw_sp2_mr_tcam_fini(void *priv) +{ +} + +const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = { + .init = mlxsw_sp2_mr_tcam_init, + .fini = mlxsw_sp2_mr_tcam_fini, + .route_create = mlxsw_sp2_mr_tcam_route_create, + .route_destroy = mlxsw_sp2_mr_tcam_route_destroy, + .route_update = mlxsw_sp2_mr_tcam_route_update, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 79b1fa27a9a4..6a38763ad261 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -48,13 +48,12 @@ #include "spectrum.h" #include "core_acl_flex_keys.h" #include "core_acl_flex_actions.h" -#include "spectrum_acl_flex_keys.h" +#include "spectrum_acl_tcam.h" struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; struct mlxsw_sp_fid *dummy_fid; - const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; struct list_head rules; struct { @@ -62,8 +61,7 @@ struct mlxsw_sp_acl { unsigned long interval; /* ms */ #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 } rule_activity_update; - unsigned long priv[0]; - /* priv has to be always the last item */ + struct mlxsw_sp_acl_tcam tcam; }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) @@ -319,7 +317,8 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - const struct mlxsw_sp_acl_profile_ops *ops) + const struct mlxsw_sp_acl_profile_ops *ops, + struct mlxsw_afk_element_usage *tmplt_elusage) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; @@ -339,7 +338,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_init; - err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); + err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv, + tmplt_elusage); if (err) goto err_ops_ruleset_add; @@ -409,7 +409,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops); @@ -421,13 +421,14 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, u32 chain_index, - enum mlxsw_sp_acl_profile profile) + enum mlxsw_sp_acl_profile profile, + struct mlxsw_afk_element_usage *tmplt_elusage) { const struct mlxsw_sp_acl_profile_ops *ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; struct mlxsw_sp_acl_ruleset *ruleset; - ops = acl->ops->profile_ops(mlxsw_sp, profile); + ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile); if (!ops) return ERR_PTR(-EINVAL); @@ -436,7 +437,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_inc(ruleset); return ruleset; } - return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops); + return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops, + tmplt_elusage); } void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, @@ -487,7 +489,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, unsigned int priority) { - rulei->priority = priority; + rulei->priority = priority >> 16; } void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, @@ -536,18 +538,23 @@ int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - struct net_device *out_dev) + struct net_device *out_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port; u8 local_port; bool in_port; if (out_dev) { - if (!mlxsw_sp_port_dev_check(out_dev)) + if (!mlxsw_sp_port_dev_check(out_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid output device"); return -EINVAL; + } mlxsw_sp_port = netdev_priv(out_dev); - if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) + if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) { + NL_SET_ERR_MSG_MOD(extack, "Invalid output device"); return -EINVAL; + } local_port = mlxsw_sp_port->local_port; in_port = false; } else { @@ -558,20 +565,22 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, in_port = true; } return mlxsw_afa_block_append_fwd(rulei->act_block, - local_port, in_port); + local_port, in_port, extack); } int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct mlxsw_sp_acl_block *block, - struct net_device *out_dev) + struct net_device *out_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_acl_block_binding *binding; struct mlxsw_sp_port *in_port; - if (!list_is_singular(&block->binding_list)) + if (!list_is_singular(&block->binding_list)) { + NL_SET_ERR_MSG_MOD(extack, "Only a single mirror source is allowed"); return -EOPNOTSUPP; - + } binding = list_first_entry(&block->binding_list, struct mlxsw_sp_acl_block_binding, list); in_port = binding->mlxsw_sp_port; @@ -579,12 +588,14 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, return mlxsw_afa_block_append_mirror(rulei->act_block, in_port->local_port, out_dev, - binding->ingress); + binding->ingress, + extack); } int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u32 action, u16 vid, u16 proto, u8 prio) + u32 action, u16 vid, u16 proto, u8 prio, + struct netlink_ext_ack *extack) { u8 ethertype; @@ -597,44 +608,50 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, ethertype = 1; break; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN protocol"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", proto); return -EINVAL; } return mlxsw_afa_block_append_vlan_modify(rulei->act_block, - vid, prio, ethertype); + vid, prio, ethertype, + extack); } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported VLAN action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); return -EINVAL; } } int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_rule_info *rulei) + struct mlxsw_sp_acl_rule_info *rulei, + struct netlink_ext_ack *extack) { return mlxsw_afa_block_append_counter(rulei->act_block, - &rulei->counter_index); + &rulei->counter_index, extack); } int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, - u16 fid) + u16 fid, struct netlink_ext_ack *extack) { - return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); + return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack); } struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - unsigned long cookie) + unsigned long cookie, + struct netlink_ext_ack *extack) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl_rule *rule; int err; mlxsw_sp_acl_ruleset_ref_inc(ruleset); - rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp), + GFP_KERNEL); if (!rule) { err = -ENOMEM; goto err_alloc; @@ -825,20 +842,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) { - const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_fid *fid; struct mlxsw_sp_acl *acl; + size_t alloc_size; int err; - acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); + alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp); + acl = kzalloc(alloc_size, GFP_KERNEL); if (!acl) return -ENOMEM; mlxsw_sp->acl = acl; acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), - mlxsw_sp_afk_blocks, - MLXSW_SP_AFK_BLOCKS_COUNT); + mlxsw_sp->afk_ops); if (!acl->afk) { err = -ENOMEM; goto err_afk_create; @@ -857,12 +874,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); - err = acl_ops->init(mlxsw_sp, acl->priv); + err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; - acl->ops = acl_ops; - /* Create the delayed work for the rule activity_update */ INIT_DELAYED_WORK(&acl->rule_activity_update.dw, mlxsw_sp_acl_rul_activity_update_work); @@ -884,10 +899,9 @@ err_afk_create: void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); - acl_ops->fini(mlxsw_sp, acl->priv); + mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c new file mode 100644 index 000000000000..3a05e0b3f730 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -0,0 +1,568 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/refcount.h> +#include <linux/rhashtable.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" +#include "core_acl_flex_keys.h" + +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6 +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11 + +struct mlxsw_sp_acl_atcam_lkey_id_ht_key { + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */ + u8 erp_id; +}; + +struct mlxsw_sp_acl_atcam_lkey_id { + struct rhash_head ht_node; + struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key; + refcount_t refcnt; + u32 id; +}; + +struct mlxsw_sp_acl_atcam_region_ops { + int (*init)(struct mlxsw_sp_acl_atcam_region *aregion); + void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion); + struct mlxsw_sp_acl_atcam_lkey_id * + (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id); + void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id); +}; + +struct mlxsw_sp_acl_atcam_region_generic { + struct mlxsw_sp_acl_atcam_lkey_id dummy_lkey_id; +}; + +struct mlxsw_sp_acl_atcam_region_12kb { + struct rhashtable lkey_ht; + unsigned int max_lkey_id; + unsigned long *used_lkey_id; +}; + +static const struct rhashtable_params mlxsw_sp_acl_atcam_lkey_id_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_atcam_lkey_id_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_atcam_lkey_id, ht_node), +}; + +static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_atcam_entry_ht_key), + .key_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_key), + .head_offset = offsetof(struct mlxsw_sp_acl_atcam_entry, ht_node), +}; + +static bool +mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry) +{ + return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp); +} + +static int +mlxsw_sp_acl_atcam_region_generic_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_atcam_region_generic *region_generic; + + region_generic = kzalloc(sizeof(*region_generic), GFP_KERNEL); + if (!region_generic) + return -ENOMEM; + + refcount_set(®ion_generic->dummy_lkey_id.refcnt, 1); + aregion->priv = region_generic; + + return 0; +} + +static void +mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + kfree(aregion->priv); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, + u8 erp_id) +{ + struct mlxsw_sp_acl_atcam_region_generic *region_generic; + + region_generic = aregion->priv; + return ®ion_generic->dummy_lkey_id; +} + +static void +mlxsw_sp_acl_atcam_generic_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ +} + +static const struct mlxsw_sp_acl_atcam_region_ops +mlxsw_sp_acl_atcam_region_generic_ops = { + .init = mlxsw_sp_acl_atcam_region_generic_init, + .fini = mlxsw_sp_acl_atcam_region_generic_fini, + .lkey_id_get = mlxsw_sp_acl_atcam_generic_lkey_id_get, + .lkey_id_put = mlxsw_sp_acl_atcam_generic_lkey_id_put, +}; + +static int +mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb; + size_t alloc_size; + u64 max_lkey_id; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID)) + return -EIO; + + max_lkey_id = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_LARGE_KEY_ID); + region_12kb = kzalloc(sizeof(*region_12kb), GFP_KERNEL); + if (!region_12kb) + return -ENOMEM; + + alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long); + region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL); + if (!region_12kb->used_lkey_id) { + err = -ENOMEM; + goto err_used_lkey_id_alloc; + } + + err = rhashtable_init(®ion_12kb->lkey_ht, + &mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (err) + goto err_rhashtable_init; + + region_12kb->max_lkey_id = max_lkey_id; + aregion->priv = region_12kb; + + return 0; + +err_rhashtable_init: + kfree(region_12kb->used_lkey_id); +err_used_lkey_id_alloc: + kfree(region_12kb); + return err; +} + +static void +mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + + rhashtable_destroy(®ion_12kb->lkey_ht); + kfree(region_12kb->used_lkey_id); + kfree(region_12kb); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_lkey_id_create(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id_ht_key *ht_key) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + u32 id; + int err; + + id = find_first_zero_bit(region_12kb->used_lkey_id, + region_12kb->max_lkey_id); + if (id < region_12kb->max_lkey_id) + __set_bit(id, region_12kb->used_lkey_id); + else + return ERR_PTR(-ENOBUFS); + + lkey_id = kzalloc(sizeof(*lkey_id), GFP_KERNEL); + if (!lkey_id) { + err = -ENOMEM; + goto err_lkey_id_alloc; + } + + lkey_id->id = id; + memcpy(&lkey_id->ht_key, ht_key, sizeof(*ht_key)); + refcount_set(&lkey_id->refcnt, 1); + + err = rhashtable_insert_fast(®ion_12kb->lkey_ht, + &lkey_id->ht_node, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (err) + goto err_rhashtable_insert; + + return lkey_id; + +err_rhashtable_insert: + kfree(lkey_id); +err_lkey_id_alloc: + __clear_bit(id, region_12kb->used_lkey_id); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + u32 id = lkey_id->id; + + rhashtable_remove_fast(®ion_12kb->lkey_ht, &lkey_id->ht_node, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + kfree(lkey_id); + __clear_bit(id, region_12kb->used_lkey_id); +} + +static struct mlxsw_sp_acl_atcam_lkey_id * +mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_rule_info *rulei, + u8 erp_id) +{ + struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + struct mlxsw_sp_acl_atcam_lkey_id_ht_key ht_key = {{ 0 } }; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + + mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key, + NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START, + MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END); + ht_key.erp_id = erp_id; + lkey_id = rhashtable_lookup_fast(®ion_12kb->lkey_ht, &ht_key, + mlxsw_sp_acl_atcam_lkey_id_ht_params); + if (lkey_id) { + refcount_inc(&lkey_id->refcnt); + return lkey_id; + } + + return mlxsw_sp_acl_atcam_lkey_id_create(aregion, &ht_key); +} + +static void +mlxsw_sp_acl_atcam_12kb_lkey_id_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id) +{ + if (refcount_dec_and_test(&lkey_id->refcnt)) + mlxsw_sp_acl_atcam_lkey_id_destroy(aregion, lkey_id); +} + +static const struct mlxsw_sp_acl_atcam_region_ops +mlxsw_sp_acl_atcam_region_12kb_ops = { + .init = mlxsw_sp_acl_atcam_region_12kb_init, + .fini = mlxsw_sp_acl_atcam_region_12kb_fini, + .lkey_id_get = mlxsw_sp_acl_atcam_12kb_lkey_id_get, + .lkey_id_put = mlxsw_sp_acl_atcam_12kb_lkey_id_put, +}; + +static const struct mlxsw_sp_acl_atcam_region_ops * +mlxsw_sp_acl_atcam_region_ops_arr[] = { + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = + &mlxsw_sp_acl_atcam_region_generic_ops, + [MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = + &mlxsw_sp_acl_atcam_region_12kb_ops, +}; + +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id) +{ + char perar_pl[MLXSW_REG_PERAR_LEN]; + /* For now, just assume that every region has 12 key blocks */ + u16 hw_region = region_id * 3; + u64 max_regions; + + max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); + if (hw_region >= max_regions) + return -ENOBUFS; + + mlxsw_reg_perar_pack(perar_pl, region_id, hw_region); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perar), perar_pl); +} + +static void +mlxsw_sp_acl_atcam_region_type_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + enum mlxsw_sp_acl_atcam_region_type region_type; + unsigned int blocks_count; + + /* We already know the blocks count can not exceed the maximum + * blocks count. + */ + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + if (blocks_count <= 2) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB; + else if (blocks_count <= 4) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB; + else if (blocks_count <= 8) + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB; + else + region_type = MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB; + + aregion->type = region_type; + aregion->ops = mlxsw_sp_acl_atcam_region_ops_arr[region_type]; +} + +int +mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops) +{ + int err; + + aregion->region = region; + aregion->atcam = atcam; + mlxsw_sp_acl_atcam_region_type_init(aregion); + + err = rhashtable_init(&aregion->entries_ht, + &mlxsw_sp_acl_atcam_entries_ht_params); + if (err) + return err; + err = aregion->ops->init(aregion); + if (err) + goto err_ops_init; + err = mlxsw_sp_acl_erp_region_init(aregion); + if (err) + goto err_erp_region_init; + err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, + region, ops); + if (err) + goto err_ctcam_region_init; + + return 0; + +err_ctcam_region_init: + mlxsw_sp_acl_erp_region_fini(aregion); +err_erp_region_init: + aregion->ops->fini(aregion); +err_ops_init: + rhashtable_destroy(&aregion->entries_ht); + return err; +} + +void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + mlxsw_sp_acl_ctcam_region_fini(&aregion->cregion); + mlxsw_sp_acl_erp_region_fini(aregion); + aregion->ops->fini(aregion); + rhashtable_destroy(&aregion->entries_ht); +} + +void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + unsigned int priority) +{ + mlxsw_sp_acl_ctcam_chunk_init(&aregion->cregion, &achunk->cchunk, + priority); +} + +void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk) +{ + mlxsw_sp_acl_ctcam_chunk_fini(&achunk->cchunk); +} + +static int +mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + char ptce3_pl[MLXSW_REG_PTCE3_LEN]; + u32 kvdl_index, priority; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true); + if (err) + return err; + + lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id); + if (IS_ERR(lkey_id)) + return PTR_ERR(lkey_id); + aentry->lkey_id = lkey_id; + + kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); + mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, + priority, region->tcam_region_info, + aentry->ht_key.enc_key, erp_id, + refcount_read(&lkey_id->refcnt) != 1, lkey_id->id, + kvdl_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); + if (err) + goto err_ptce3_write; + + return 0; + +err_ptce3_write: + aregion->ops->lkey_id_put(aregion, lkey_id); + return err; +} + +static void +mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id; + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + char ptce3_pl[MLXSW_REG_PTCE3_LEN]; + + mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, + region->tcam_region_info, aentry->ht_key.enc_key, + erp_id, refcount_read(&lkey_id->refcnt) != 1, + lkey_id->id, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); + aregion->ops->lkey_id_put(aregion, lkey_id); +} + +static int +__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 }; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_erp *erp; + unsigned int blocks_count; + int err; + + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, + aentry->ht_key.enc_key, mask, 0, blocks_count - 1); + + erp = mlxsw_sp_acl_erp_get(aregion, mask, false); + if (IS_ERR(erp)) + return PTR_ERR(erp); + aentry->erp = erp; + aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp); + + /* We can't insert identical rules into the A-TCAM, so fail and + * let the rule spill into C-TCAM + */ + err = rhashtable_lookup_insert_fast(&aregion->entries_ht, + &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry, + rulei); + if (err) + goto err_rule_insert; + + return 0; + +err_rule_insert: + rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); +err_rhashtable_insert: + mlxsw_sp_acl_erp_put(aregion, erp); + return err; +} + +static void +__mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry); + rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, + mlxsw_sp_acl_atcam_entries_ht_params); + mlxsw_sp_acl_erp_put(aregion, aentry->erp); +} + +int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + err = __mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, aregion, aentry, rulei); + if (!err) + return 0; + + /* It is possible we failed to add the rule to the A-TCAM due to + * exceeded number of masks. Try to spill into C-TCAM. + */ + err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion, + &achunk->cchunk, &aentry->centry, + rulei, true); + if (!err) + return 0; + + return err; +} + +void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + if (mlxsw_sp_acl_atcam_is_centry(aentry)) + mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &aregion->cregion, + &achunk->cchunk, &aentry->centry); + else + __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry); +} + +int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + return mlxsw_sp_acl_erps_init(mlxsw_sp, atcam); +} + +void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c new file mode 100644 index 000000000000..7440a1189250 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -0,0 +1,227 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/parman.h> + +#include "reg.h" +#include "core.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +static int +mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 new_size) +{ + char ptar_pl[MLXSW_REG_PTAR_LEN]; + + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, + region->key_type, new_size, region->id, + region->tcam_region_info); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + u16 src_offset, u16 dst_offset, u16 size) +{ + char prcr_pl[MLXSW_REG_PRCR_LEN]; + + mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, + region->tcam_region_info, src_offset, + region->tcam_region_info, dst_offset, size); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); +} + +static int +mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) +{ + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + unsigned int blocks_count; + char *act_set; + u32 priority; + char *mask; + char *key; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, + fillup_priority); + if (err) + return err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + region->tcam_region_info, + centry->parman_item.index, priority); + key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); + mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); + blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0, + blocks_count - 1); + + err = cregion->ops->entry_insert(cregion, centry, mask); + if (err) + return err; + + /* Only the first action set belongs here, the rest is in KVD */ + act_set = mlxsw_afa_block_first_set(rulei->act_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + +static void +mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + + mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, + cregion->region->tcam_region_info, + centry->parman_item.index, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + cregion->ops->entry_remove(cregion, centry); +} + +static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count); +} + +static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_acl_ctcam_region *cregion = priv; + struct mlxsw_sp_acl_tcam_region *region = cregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + + mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region, + from_index, to_index, count); +} + +static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = { + .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_acl_ctcam_region_parman_resize, + .move = mlxsw_sp_acl_ctcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +int +mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops) +{ + cregion->region = region; + cregion->ops = ops; + cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops, + cregion); + if (!cregion->parman) + return -ENOMEM; + return 0; +} + +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion) +{ + parman_destroy(cregion->parman); +} + +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority) +{ + parman_prio_init(cregion->parman, &cchunk->parman_prio, priority); +} + +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk) +{ + parman_prio_fini(&cchunk->parman_prio); +} + +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority) +{ + int err; + + err = parman_item_add(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + if (err) + return err; + + err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion, centry, + rulei, fillup_priority); + if (err) + goto err_rule_insert; + return 0; + +err_rule_insert: + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); + return err; +} + +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry) +{ + mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion, centry); + parman_item_remove(cregion->parman, &cchunk->parman_prio, + ¢ry->parman_item); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c new file mode 100644 index 000000000000..463590bbb348 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -0,0 +1,1199 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/bitmap.h> +#include <linux/errno.h> +#include <linux/genalloc.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/rhashtable.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> + +#include "core.h" +#include "reg.h" +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +/* gen_pool_alloc() returns 0 when allocation fails, so use an offset */ +#define MLXSW_SP_ACL_ERP_GENALLOC_OFFSET 0x100 +#define MLXSW_SP_ACL_ERP_MAX_PER_REGION 16 + +struct mlxsw_sp_acl_erp_core { + unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1]; + struct gen_pool *erp_tables; + struct mlxsw_sp *mlxsw_sp; + unsigned int num_erp_banks; +}; + +struct mlxsw_sp_acl_erp_key { + char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; + bool ctcam; +}; + +struct mlxsw_sp_acl_erp { + struct mlxsw_sp_acl_erp_key key; + u8 id; + u8 index; + refcount_t refcnt; + DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); + struct list_head list; + struct rhash_head ht_node; + struct mlxsw_sp_acl_erp_table *erp_table; +}; + +struct mlxsw_sp_acl_erp_master_mask { + DECLARE_BITMAP(bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); + unsigned int count[MLXSW_SP_ACL_TCAM_MASK_LEN]; +}; + +struct mlxsw_sp_acl_erp_table { + struct mlxsw_sp_acl_erp_master_mask master_mask; + DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); + DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); + struct list_head atcam_erps_list; + struct rhashtable erp_ht; + struct mlxsw_sp_acl_erp_core *erp_core; + struct mlxsw_sp_acl_atcam_region *aregion; + const struct mlxsw_sp_acl_erp_table_ops *ops; + unsigned long base_index; + unsigned int num_atcam_erps; + unsigned int num_max_atcam_erps; + unsigned int num_ctcam_erps; +}; + +static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = { + .key_len = sizeof(struct mlxsw_sp_acl_erp_key), + .key_offset = offsetof(struct mlxsw_sp_acl_erp, key), + .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node), +}; + +struct mlxsw_sp_acl_erp_table_ops { + struct mlxsw_sp_acl_erp * + (*erp_create)(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); + void (*erp_destroy)(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +}; + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key); +static void +mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); +static void +mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp); + +static const struct mlxsw_sp_acl_erp_table_ops erp_multiple_masks_ops = { + .erp_create = mlxsw_sp_acl_erp_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_two_masks_ops = { + .erp_create = mlxsw_sp_acl_erp_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_second_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_single_mask_ops = { + .erp_create = mlxsw_sp_acl_erp_second_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_first_mask_destroy, +}; + +static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = { + .erp_create = mlxsw_sp_acl_erp_first_mask_create, + .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy, +}; + +bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp) +{ + return erp->key.ctcam; +} + +u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp) +{ + return erp->id; +} + +static unsigned int +mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion; + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + + return erp_core->erpt_entries_size[aregion->type]; +} + +static int mlxsw_sp_acl_erp_id_get(struct mlxsw_sp_acl_erp_table *erp_table, + u8 *p_id) +{ + u8 id; + + id = find_first_zero_bit(erp_table->erp_id_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + if (id < MLXSW_SP_ACL_ERP_MAX_PER_REGION) { + __set_bit(id, erp_table->erp_id_bitmap); + *p_id = id; + return 0; + } + + return -ENOBUFS; +} + +static void mlxsw_sp_acl_erp_id_put(struct mlxsw_sp_acl_erp_table *erp_table, + u8 id) +{ + __clear_bit(id, erp_table->erp_id_bitmap); +} + +static void +mlxsw_sp_acl_erp_master_mask_bit_set(unsigned long bit, + struct mlxsw_sp_acl_erp_master_mask *mask) +{ + if (mask->count[bit]++ == 0) + __set_bit(bit, mask->bitmap); +} + +static void +mlxsw_sp_acl_erp_master_mask_bit_clear(unsigned long bit, + struct mlxsw_sp_acl_erp_master_mask *mask) +{ + if (--mask->count[bit] == 0) + __clear_bit(bit, mask->bitmap); +} + +static int +mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; + char percr_pl[MLXSW_REG_PERCR_LEN]; + char *master_mask; + + mlxsw_reg_percr_pack(percr_pl, region->id); + master_mask = mlxsw_reg_percr_master_mask_data(percr_pl); + bitmap_to_arr32((u32 *) master_mask, erp_table->master_mask.bitmap, + MLXSW_SP_ACL_TCAM_MASK_LEN); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp) +{ + unsigned long bit; + int err; + + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_set(bit, + &erp_table->master_mask); + + err = mlxsw_sp_acl_erp_master_mask_update(erp_table); + if (err) + goto err_master_mask_update; + + return 0; + +err_master_mask_update: + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_clear(bit, + &erp_table->master_mask); + return err; +} + +static int +mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp) +{ + unsigned long bit; + int err; + + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_clear(bit, + &erp_table->master_mask); + + err = mlxsw_sp_acl_erp_master_mask_update(erp_table); + if (err) + goto err_master_mask_update; + + return 0; + +err_master_mask_update: + for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + mlxsw_sp_acl_erp_master_mask_bit_set(bit, + &erp_table->master_mask); + return err; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + erp = kzalloc(sizeof(*erp), GFP_KERNEL); + if (!erp) + return ERR_PTR(-ENOMEM); + + err = mlxsw_sp_acl_erp_id_get(erp_table, &erp->id); + if (err) + goto err_erp_id_get; + + memcpy(&erp->key, key, sizeof(*key)); + bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + list_add(&erp->list, &erp_table->atcam_erps_list); + refcount_set(&erp->refcnt, 1); + erp_table->num_atcam_erps++; + erp->erp_table = erp_table; + + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + if (err) + goto err_master_mask_set; + + err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_insert; + + return erp; + +err_rhashtable_insert: + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); +err_master_mask_set: + erp_table->num_atcam_erps--; + list_del(&erp->list); + mlxsw_sp_acl_erp_id_put(erp_table, erp->id); +err_erp_id_get: + kfree(erp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + erp_table->num_atcam_erps--; + list_del(&erp->list); + mlxsw_sp_acl_erp_id_put(erp_table, erp->id); + kfree(erp); +} + +static int +mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core, + unsigned int num_erps, + enum mlxsw_sp_acl_atcam_region_type region_type, + unsigned long *p_index) +{ + unsigned int num_rows, entry_size; + + /* We only allow allocations of entire rows */ + if (num_erps % erp_core->num_erp_banks != 0) + return -EINVAL; + + entry_size = erp_core->erpt_entries_size[region_type]; + num_rows = num_erps / erp_core->num_erp_banks; + + *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size); + if (*p_index == 0) + return -ENOBUFS; + *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; + + return 0; +} + +static void +mlxsw_sp_acl_erp_table_free(struct mlxsw_sp_acl_erp_core *erp_core, + unsigned int num_erps, + enum mlxsw_sp_acl_atcam_region_type region_type, + unsigned long index) +{ + unsigned long base_index; + unsigned int entry_size; + size_t size; + + entry_size = erp_core->erpt_entries_size[region_type]; + base_index = index + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; + size = num_erps / erp_core->num_erp_banks * entry_size; + gen_pool_free(erp_core->erp_tables, base_index, size); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_table_master_rp(struct mlxsw_sp_acl_erp_table *erp_table) +{ + if (!list_is_singular(&erp_table->atcam_erps_list)) + return NULL; + + return list_first_entry(&erp_table->atcam_erps_list, + struct mlxsw_sp_acl_erp, list); +} + +static int mlxsw_sp_acl_erp_index_get(struct mlxsw_sp_acl_erp_table *erp_table, + u8 *p_index) +{ + u8 index; + + index = find_first_zero_bit(erp_table->erp_index_bitmap, + erp_table->num_max_atcam_erps); + if (index < erp_table->num_max_atcam_erps) { + __set_bit(index, erp_table->erp_index_bitmap); + *p_index = index; + return 0; + } + + return -ENOBUFS; +} + +static void mlxsw_sp_acl_erp_index_put(struct mlxsw_sp_acl_erp_table *erp_table, + u8 index) +{ + __clear_bit(index, erp_table->erp_index_bitmap); +} + +static void +mlxsw_sp_acl_erp_table_locate(const struct mlxsw_sp_acl_erp_table *erp_table, + const struct mlxsw_sp_acl_erp *erp, + u8 *p_erpt_bank, u8 *p_erpt_index) +{ + unsigned int entry_size = mlxsw_sp_acl_erp_table_entry_size(erp_table); + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + unsigned int row; + + *p_erpt_bank = erp->index % erp_core->num_erp_banks; + row = erp->index / erp_core->num_erp_banks; + *p_erpt_index = erp_table->base_index + row * entry_size; +} + +static int +mlxsw_sp_acl_erp_table_erp_add(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + enum mlxsw_reg_perpt_key_size key_size; + char perpt_pl[MLXSW_REG_PERPT_LEN]; + u8 erpt_bank, erpt_index; + + mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index); + key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type; + mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id, + 0, erp_table->base_index, erp->index, + erp->key.mask); + mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, true); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl); +} + +static void mlxsw_sp_acl_erp_table_erp_del(struct mlxsw_sp_acl_erp *erp) +{ + char empty_mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 }; + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + enum mlxsw_reg_perpt_key_size key_size; + char perpt_pl[MLXSW_REG_PERPT_LEN]; + u8 erpt_bank, erpt_index; + + mlxsw_sp_acl_erp_table_locate(erp_table, erp, &erpt_bank, &erpt_index); + key_size = (enum mlxsw_reg_perpt_key_size) erp_table->aregion->type; + mlxsw_reg_perpt_pack(perpt_pl, erpt_bank, erpt_index, key_size, erp->id, + 0, erp_table->base_index, erp->index, empty_mask); + mlxsw_reg_perpt_erp_vector_pack(perpt_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_perpt_erp_vector_set(perpt_pl, erp->index, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(perpt), perpt_pl); +} + +static int +mlxsw_sp_acl_erp_table_enable(struct mlxsw_sp_acl_erp_table *erp_table, + bool ctcam_le) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static void +mlxsw_sp_acl_erp_table_disable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + struct mlxsw_sp_acl_erp *master_rp; + + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + /* It is possible we do not have a master RP when we disable the + * table when there are no rules in the A-TCAM and the last C-TCAM + * rule is deleted + */ + mlxsw_reg_pererp_pack(pererp_pl, region->id, false, false, 0, 0, + master_rp ? master_rp->id : 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static int +mlxsw_sp_acl_erp_table_relocate(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + list_for_each_entry(erp, &erp_table->atcam_erps_list, list) { + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + } + + return 0; + +err_table_erp_add: + list_for_each_entry_continue_reverse(erp, &erp_table->atcam_erps_list, + list) + mlxsw_sp_acl_erp_table_erp_del(erp); + return err; +} + +static int +mlxsw_sp_acl_erp_table_expand(struct mlxsw_sp_acl_erp_table *erp_table) +{ + unsigned int num_erps, old_num_erps = erp_table->num_max_atcam_erps; + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + unsigned long old_base_index = erp_table->base_index; + bool ctcam_le = erp_table->num_ctcam_erps > 0; + int err; + + if (erp_table->num_atcam_erps < erp_table->num_max_atcam_erps) + return 0; + + if (erp_table->num_max_atcam_erps == MLXSW_SP_ACL_ERP_MAX_PER_REGION) + return -ENOBUFS; + + num_erps = old_num_erps + erp_core->num_erp_banks; + err = mlxsw_sp_acl_erp_table_alloc(erp_core, num_erps, + erp_table->aregion->type, + &erp_table->base_index); + if (err) + return err; + erp_table->num_max_atcam_erps = num_erps; + + err = mlxsw_sp_acl_erp_table_relocate(erp_table); + if (err) + goto err_table_relocate; + + err = mlxsw_sp_acl_erp_table_enable(erp_table, ctcam_le); + if (err) + goto err_table_enable; + + mlxsw_sp_acl_erp_table_free(erp_core, old_num_erps, + erp_table->aregion->type, old_base_index); + + return 0; + +err_table_enable: +err_table_relocate: + erp_table->num_max_atcam_erps = old_num_erps; + mlxsw_sp_acl_erp_table_free(erp_core, num_erps, + erp_table->aregion->type, + erp_table->base_index); + erp_table->base_index = old_base_index; + return err; +} + +static int +mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + struct mlxsw_sp_acl_erp *master_rp; + int err; + + /* Initially, allocate a single eRP row. Expand later as needed */ + err = mlxsw_sp_acl_erp_table_alloc(erp_core, erp_core->num_erp_banks, + erp_table->aregion->type, + &erp_table->base_index); + if (err) + return err; + erp_table->num_max_atcam_erps = erp_core->num_erp_banks; + + /* Transition the sole RP currently configured (the master RP) + * to the eRP table + */ + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + if (!master_rp) { + err = -EINVAL; + goto err_table_master_rp; + } + + /* Maintain the same eRP bank for the master RP, so that we + * wouldn't need to update the bloom filter + */ + master_rp->index = master_rp->index % erp_core->num_erp_banks; + __set_bit(master_rp->index, erp_table->erp_index_bitmap); + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp); + if (err) + goto err_table_master_rp_add; + + err = mlxsw_sp_acl_erp_table_enable(erp_table, false); + if (err) + goto err_table_enable; + + return 0; + +err_table_enable: + mlxsw_sp_acl_erp_table_erp_del(master_rp); +err_table_master_rp_add: + __clear_bit(master_rp->index, erp_table->erp_index_bitmap); +err_table_master_rp: + mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); + return err; +} + +static void +mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_table) +{ + struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; + struct mlxsw_sp_acl_erp *master_rp; + + mlxsw_sp_acl_erp_table_disable(erp_table); + master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); + if (!master_rp) + return; + mlxsw_sp_acl_erp_table_erp_del(master_rp); + __clear_bit(master_rp->index, erp_table->erp_index_bitmap); + mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); +} + +static int +mlxsw_sp_acl_erp_region_erp_add(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + bool ctcam_le = erp_table->num_ctcam_erps > 0; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, true); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static void mlxsw_sp_acl_erp_region_erp_del(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + struct mlxsw_sp_acl_tcam_region *region = erp_table->aregion->region; + struct mlxsw_sp *mlxsw_sp = erp_table->erp_core->mlxsw_sp; + bool ctcam_le = erp_table->num_ctcam_erps > 0; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, region->id, ctcam_le, true, 0, + erp_table->base_index, 0); + mlxsw_reg_pererp_erp_vector_pack(pererp_pl, erp_table->erp_index_bitmap, + MLXSW_SP_ACL_ERP_MAX_PER_REGION); + mlxsw_reg_pererp_erpt_vector_set(pererp_pl, erp->index, false); + + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +static int +mlxsw_sp_acl_erp_region_ctcam_enable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + /* No need to re-enable lookup in the C-TCAM */ + if (erp_table->num_ctcam_erps > 1) + return 0; + + return mlxsw_sp_acl_erp_table_enable(erp_table, true); +} + +static void +mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table) +{ + /* Only disable C-TCAM lookup when last C-TCAM eRP is deleted */ + if (erp_table->num_ctcam_erps > 1) + return; + + mlxsw_sp_acl_erp_table_enable(erp_table, false); +} + +static void +mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table) +{ + switch (erp_table->num_atcam_erps) { + case 2: + /* Keep using the eRP table, but correctly set the + * operations pointer so that when an A-TCAM eRP is + * deleted we will transition to use the master mask + */ + erp_table->ops = &erp_two_masks_ops; + break; + case 1: + /* We only kept the eRP table because we had C-TCAM + * eRPs in use. Now that the last C-TCAM eRP is gone we + * can stop using the table and transition to use the + * master mask + */ + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + erp_table->ops = &erp_single_mask_ops; + break; + case 0: + /* There are no more eRPs of any kind used by the region + * so free its eRP table and transition to initial state + */ + mlxsw_sp_acl_erp_table_disable(erp_table); + mlxsw_sp_acl_erp_table_free(erp_table->erp_core, + erp_table->num_max_atcam_erps, + erp_table->aregion->type, + erp_table->base_index); + erp_table->ops = &erp_no_mask_ops; + break; + default: + break; + } +} + +static struct mlxsw_sp_acl_erp * +__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + erp = kzalloc(sizeof(*erp), GFP_KERNEL); + if (!erp) + return ERR_PTR(-ENOMEM); + + memcpy(&erp->key, key, sizeof(*key)); + bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + refcount_set(&erp->refcnt, 1); + erp_table->num_ctcam_erps++; + erp->erp_table = erp_table; + + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + if (err) + goto err_master_mask_set; + + err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_insert; + + err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table); + if (err) + goto err_erp_region_ctcam_enable; + + /* When C-TCAM is used, the eRP table must be used */ + erp_table->ops = &erp_multiple_masks_ops; + + return erp; + +err_erp_region_ctcam_enable: + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); +err_rhashtable_insert: + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); +err_master_mask_set: + erp_table->num_ctcam_erps--; + kfree(erp); + return ERR_PTR(err); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + /* There is a special situation where we need to spill rules + * into the C-TCAM, yet the region is still using a master + * mask and thus not performing a lookup in the C-TCAM. This + * can happen when two rules that only differ in priority - and + * thus sharing the same key - are programmed. In this case + * we transition the region to use an eRP table + */ + err = mlxsw_sp_acl_erp_region_table_trans(erp_table); + if (err) + return ERR_PTR(err); + + erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + if (IS_ERR(erp)) { + err = PTR_ERR(erp); + goto err_erp_create; + } + + return erp; + +err_erp_create: + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; + + mlxsw_sp_acl_erp_region_ctcam_disable(erp_table); + rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, + mlxsw_sp_acl_erp_ht_params); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + erp_table->num_ctcam_erps--; + kfree(erp); + + /* Once the last C-TCAM eRP was destroyed, the state we + * transition to depends on the number of A-TCAM eRPs currently + * in use + */ + if (erp_table->num_ctcam_erps > 0) + return; + mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table); +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + if (key->ctcam) + return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + + /* Expand the eRP table for the new eRP, if needed */ + err = mlxsw_sp_acl_erp_table_expand(erp_table); + if (err) + return ERR_PTR(err); + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) + return erp; + + err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index); + if (err) + goto err_erp_index_get; + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + + err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp); + if (err) + goto err_region_erp_add; + + erp_table->ops = &erp_multiple_masks_ops; + + return erp; + +err_region_erp_add: + mlxsw_sp_acl_erp_table_erp_del(erp); +err_table_erp_add: + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); +err_erp_index_get: + mlxsw_sp_acl_erp_generic_destroy(erp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + if (erp->key.ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp); + + mlxsw_sp_acl_erp_region_erp_del(erp); + mlxsw_sp_acl_erp_table_erp_del(erp); + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); + mlxsw_sp_acl_erp_generic_destroy(erp); + + if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0) + erp_table->ops = &erp_two_masks_ops; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_second_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + int err; + + if (key->ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + + /* Transition to use eRP table instead of master mask */ + err = mlxsw_sp_acl_erp_region_table_trans(erp_table); + if (err) + return ERR_PTR(err); + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) { + err = PTR_ERR(erp); + goto err_erp_create; + } + + err = mlxsw_sp_acl_erp_index_get(erp_table, &erp->index); + if (err) + goto err_erp_index_get; + + err = mlxsw_sp_acl_erp_table_erp_add(erp_table, erp); + if (err) + goto err_table_erp_add; + + err = mlxsw_sp_acl_erp_region_erp_add(erp_table, erp); + if (err) + goto err_region_erp_add; + + erp_table->ops = &erp_two_masks_ops; + + return erp; + +err_region_erp_add: + mlxsw_sp_acl_erp_table_erp_del(erp); +err_table_erp_add: + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); +err_erp_index_get: + mlxsw_sp_acl_erp_generic_destroy(erp); +err_erp_create: + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_second_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + if (erp->key.ctcam) + return mlxsw_sp_acl_erp_ctcam_mask_destroy(erp); + + mlxsw_sp_acl_erp_region_erp_del(erp); + mlxsw_sp_acl_erp_table_erp_del(erp); + mlxsw_sp_acl_erp_index_put(erp_table, erp->index); + mlxsw_sp_acl_erp_generic_destroy(erp); + /* Transition to use master mask instead of eRP table */ + mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); + + erp_table->ops = &erp_single_mask_ops; +} + +static struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_first_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) +{ + struct mlxsw_sp_acl_erp *erp; + + if (key->ctcam) + return ERR_PTR(-EINVAL); + + erp = mlxsw_sp_acl_erp_generic_create(erp_table, key); + if (IS_ERR(erp)) + return erp; + + erp_table->ops = &erp_single_mask_ops; + + return erp; +} + +static void +mlxsw_sp_acl_erp_first_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + mlxsw_sp_acl_erp_generic_destroy(erp); + erp_table->ops = &erp_no_mask_ops; +} + +static void +mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + WARN_ON(1); +} + +struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask, bool ctcam) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct mlxsw_sp_acl_erp_key key; + struct mlxsw_sp_acl_erp *erp; + + /* eRPs are allocated from a shared resource, but currently all + * allocations are done under RTNL. + */ + ASSERT_RTNL(); + + memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); + key.ctcam = ctcam; + erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key, + mlxsw_sp_acl_erp_ht_params); + if (erp) { + refcount_inc(&erp->refcnt); + return erp; + } + + return erp_table->ops->erp_create(erp_table, &key); +} + +void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + + ASSERT_RTNL(); + + if (!refcount_dec_and_test(&erp->refcnt)) + return; + + erp_table->ops->erp_destroy(erp_table, erp); +} + +static struct mlxsw_sp_acl_erp_table * +mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_erp_table *erp_table; + int err; + + erp_table = kzalloc(sizeof(*erp_table), GFP_KERNEL); + if (!erp_table) + return ERR_PTR(-ENOMEM); + + err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params); + if (err) + goto err_rhashtable_init; + + erp_table->erp_core = aregion->atcam->erp_core; + erp_table->ops = &erp_no_mask_ops; + INIT_LIST_HEAD(&erp_table->atcam_erps_list); + erp_table->aregion = aregion; + + return erp_table; + +err_rhashtable_init: + kfree(erp_table); + return ERR_PTR(err); +} + +static void +mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table) +{ + WARN_ON(!list_empty(&erp_table->atcam_erps_list)); + rhashtable_destroy(&erp_table->erp_ht); + kfree(erp_table); +} + +static int +mlxsw_sp_acl_erp_master_mask_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + char percr_pl[MLXSW_REG_PERCR_LEN]; + + mlxsw_reg_percr_pack(percr_pl, aregion->region->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(percr), percr_pl); +} + +static int +mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + char pererp_pl[MLXSW_REG_PERERP_LEN]; + + mlxsw_reg_pererp_pack(pererp_pl, aregion->region->id, false, false, 0, + 0, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); +} + +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_erp_table *erp_table; + int err; + + erp_table = mlxsw_sp_acl_erp_table_create(aregion); + if (IS_ERR(erp_table)) + return PTR_ERR(erp_table); + aregion->erp_table = erp_table; + + /* Initialize the region's master mask to all zeroes */ + err = mlxsw_sp_acl_erp_master_mask_init(aregion); + if (err) + goto err_erp_master_mask_init; + + /* Initialize the region to not use the eRP table */ + err = mlxsw_sp_acl_erp_region_param_init(aregion); + if (err) + goto err_erp_region_param_init; + + return 0; + +err_erp_region_param_init: +err_erp_master_mask_init: + mlxsw_sp_acl_erp_table_destroy(erp_table); + return err; +} + +void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) +{ + mlxsw_sp_acl_erp_table_destroy(aregion->erp_table); +} + +static int +mlxsw_sp_acl_erp_tables_sizes_query(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + unsigned int size; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB)) + return -EIO; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_2KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_4KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_8KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB] = size; + + size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ERPT_ENTRIES_12KB); + erp_core->erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB] = size; + + return 0; +} + +static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + unsigned int erpt_bank_size; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANK_SIZE) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_ERPT_BANKS)) + return -EIO; + erpt_bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_ERPT_BANK_SIZE); + erp_core->num_erp_banks = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_ERPT_BANKS); + + erp_core->erp_tables = gen_pool_create(0, -1); + if (!erp_core->erp_tables) + return -ENOMEM; + gen_pool_set_algo(erp_core->erp_tables, gen_pool_best_fit, NULL); + + err = gen_pool_add(erp_core->erp_tables, + MLXSW_SP_ACL_ERP_GENALLOC_OFFSET, erpt_bank_size, + -1); + if (err) + goto err_gen_pool_add; + + /* Different regions require masks of different sizes */ + err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core); + if (err) + goto err_erp_tables_sizes_query; + + return 0; + +err_erp_tables_sizes_query: +err_gen_pool_add: + gen_pool_destroy(erp_core->erp_tables); + return err; +} + +static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_erp_core *erp_core) +{ + gen_pool_destroy(erp_core->erp_tables); +} + +int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + struct mlxsw_sp_acl_erp_core *erp_core; + int err; + + erp_core = kzalloc(sizeof(*erp_core), GFP_KERNEL); + if (!erp_core) + return -ENOMEM; + erp_core->mlxsw_sp = mlxsw_sp; + atcam->erp_core = erp_core; + + err = mlxsw_sp_acl_erp_tables_init(mlxsw_sp, erp_core); + if (err) + goto err_erp_tables_init; + + return 0; + +err_erp_tables_init: + kfree(erp_core); + return err; +} + +void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam) +{ + mlxsw_sp_acl_erp_tables_fini(mlxsw_sp, atcam->erp_core); + kfree(atcam->erp_core); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 510ce48d87f7..bca0def756cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -37,10 +37,8 @@ #include "core_acl_flex_actions.h" #include "spectrum_span.h" -#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 - static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, - char *enc_actions, bool is_first) + char *enc_actions, bool is_first, bool ca) { struct mlxsw_sp *mlxsw_sp = priv; char pefa_pl[MLXSW_REG_PEFA_LEN]; @@ -53,11 +51,11 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, if (is_first) return 0; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE, - &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, &kvdl_index); if (err) return err; - mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); if (err) goto err_pefa_write; @@ -65,10 +63,25 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, return 0; err_pefa_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, kvdl_index); return err; } +static int mlxsw_sp1_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, false); +} + +static int mlxsw_sp2_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + return mlxsw_sp_act_kvdl_set_add(priv, p_kvdl_index, enc_actions, + is_first, true); +} + static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, bool is_first) { @@ -76,7 +89,29 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, if (is_first) return; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, + 1, kvdl_index); +} + +static int mlxsw_sp1_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + return -EOPNOTSUPP; +} + +static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index, + bool *activity) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + int err; + + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, true, NULL); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + return err; + mlxsw_reg_pefa_unpack(pefa_pl, activity); + return 0; } static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, @@ -87,7 +122,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, u32 kvdl_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, &kvdl_index); if (err) return err; mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); @@ -98,7 +134,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, return 0; err_ppbs_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, kvdl_index); return err; } @@ -106,7 +143,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) { struct mlxsw_sp *mlxsw_sp = priv; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, + 1, kvdl_index); } static int @@ -154,22 +192,36 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) mlxsw_sp_span_mirror_del(in_port, span_id, type, false); } -static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, +const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { + .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp1_act_kvdl_set_activity_get, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, + .counter_index_get = mlxsw_sp_act_counter_index_get, + .counter_index_put = mlxsw_sp_act_counter_index_put, + .mirror_add = mlxsw_sp_act_mirror_add, + .mirror_del = mlxsw_sp_act_mirror_del, +}; + +const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { + .kvdl_set_add = mlxsw_sp2_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_set_activity_get = mlxsw_sp2_act_kvdl_set_activity_get, .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, .counter_index_get = mlxsw_sp_act_counter_index_get, .counter_index_put = mlxsw_sp_act_counter_index_put, .mirror_add = mlxsw_sp_act_mirror_add, .mirror_del = mlxsw_sp_act_mirror_del, + .dummy_first_set = true, }; int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_ACTIONS_PER_SET), - &mlxsw_sp_act_afa_ops, mlxsw_sp); + mlxsw_sp->afa_ops, mlxsw_sp); return PTR_ERR_OR_ZERO(mlxsw_sp->afa); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c new file mode 100644 index 000000000000..aa8927cee376 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -0,0 +1,316 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include "spectrum.h" +#include "item.h" +#include "core_acl_flex_keys.h" + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2), + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), + MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4), + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), +}; + +static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), + MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), + MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), + MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4), + MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), + MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), + MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), + MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), + MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), + MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), +}; + +#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16 + +static void mlxsw_sp1_afk_encode_block(char *block, int block_index, + char *output) +{ + unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; + char *output_indexed = output + offset; + + memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE); +} + +const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { + .blocks = mlxsw_sp1_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), + .encode_block = mlxsw_sp1_afk_encode_block, +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x04, 2), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_3[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x06, 2), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_4[] = { + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 16, 12), + MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x04, 0, 8), /* RX_ACL_SYSTEM_PORT */ +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x04, 0, 6), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 6, 2), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 8, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = { + MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_3[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_4[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_5[] = { + MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x04, 4), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = { + MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x04, 16, 16), + MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x04, 0, 16), +}; + +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = { + MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */ +}; + +static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { + MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0), + MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1), + MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2), + MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3), + MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4), + MLXSW_AFK_BLOCK(0x15, mlxsw_sp_afk_element_info_mac_5), + MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), + MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), + MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), + MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), + MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), + MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), + MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3), + MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4), + MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5), + MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0), + MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2), +}; + +#define MLXSW_SP2_AFK_BITS_PER_BLOCK 36 + +/* A block in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | | | | | | | | | | | | |35|34|33|32| + * +-----------------------------------------------------------------------------------------------+ + * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + */ +MLXSW_ITEM64(sp2_afk, block, value, 0x00, 0, MLXSW_SP2_AFK_BITS_PER_BLOCK); + +/* The key / mask block layout in Spectrum-2 is of the following form: + * + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * | | | | | | | | | | | | | | | | | block11_high | + * +-----------------------------------------------------------------------------------------------+ + * | block11_low | block10_high | + * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + * ... + */ + +struct mlxsw_sp2_afk_block_layout { + unsigned short offset; + struct mlxsw_item item; +}; + +#define MLXSW_SP2_AFK_BLOCK_LAYOUT(_block, _offset, _shift) \ + { \ + .offset = _offset, \ + { \ + .shift = _shift, \ + .size = {.bits = MLXSW_SP2_AFK_BITS_PER_BLOCK}, \ + .name = #_block, \ + } \ + } \ + +static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = { + MLXSW_SP2_AFK_BLOCK_LAYOUT(block0, 0x30, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block1, 0x2C, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block2, 0x28, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block3, 0x24, 12), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block4, 0x20, 16), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block5, 0x1C, 20), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block6, 0x18, 24), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block7, 0x14, 28), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block8, 0x0C, 0), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block9, 0x08, 4), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block10, 0x04, 8), + MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12), +}; + +static void mlxsw_sp2_afk_encode_block(char *block, int block_index, + char *output) +{ + u64 block_value = mlxsw_sp2_afk_block_value_get(block); + const struct mlxsw_sp2_afk_block_layout *block_layout; + + if (WARN_ON(block_index < 0 || + block_index >= ARRAY_SIZE(mlxsw_sp2_afk_blocks_layout))) + return; + + block_layout = &mlxsw_sp2_afk_blocks_layout[block_index]; + __mlxsw_item_set64(output + block_layout->offset, + &block_layout->item, 0, block_value); +} + +const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { + .blocks = mlxsw_sp2_afk_blocks, + .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), + .encode_block = mlxsw_sp2_afk_encode_block, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h deleted file mode 100644 index fb8031828454..000000000000 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H -#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H - -#include "core_acl_flex_keys.h" - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6), - MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { - MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), - MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { - MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), - MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), - MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), - MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { - MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), - MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), - MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), - MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = { - MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8), - MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = { - MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8), -}; - -static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = { - MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16), -}; - -static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = { - MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac), - MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac), - MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex), - MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip), - MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip), - MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4), - MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex), - MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip), - MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1), - MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip), - MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex), - MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type), -}; - -#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks) - -#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index ad1b548e3cac..245e2f473c6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -39,25 +39,25 @@ #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> -#include <linux/parman.h> #include "reg.h" #include "core.h" #include "resources.h" #include "spectrum.h" +#include "spectrum_acl_tcam.h" #include "core_acl_flex_keys.h" -struct mlxsw_sp_acl_tcam { - unsigned long *used_regions; /* bit array */ - unsigned int max_regions; - unsigned long *used_groups; /* bit array */ - unsigned int max_groups; - unsigned int max_group_size; -}; +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->priv_size; +} -static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; u64 max_tcam_regions; u64 max_regions; u64 max_groups; @@ -88,21 +88,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) tcam->max_groups = max_groups; tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUP_SIZE); + + err = ops->init(mlxsw_sp, tcam->priv, tcam); + if (err) + goto err_tcam_init; + return 0; +err_tcam_init: + kfree(tcam->used_groups); err_alloc_used_groups: kfree(tcam->used_regions); return err; } -static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) { - struct mlxsw_sp_acl_tcam *tcam = priv; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + ops->fini(mlxsw_sp, tcam->priv); kfree(tcam->used_groups); kfree(tcam->used_regions); } +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority) +{ + u64 max_priority; + + if (!fillup_priority) { + *priority = 0; + return 0; + } + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) + return -EIO; + + max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE); + if (rulei->priority > max_priority) + return -EINVAL; + + /* Unlike in TC, in HW, higher number means higher priority. */ + *priority = max_priority - rulei->priority; + return 0; +} + static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, u16 *p_id) { @@ -157,37 +189,25 @@ struct mlxsw_sp_acl_tcam_group { struct mlxsw_sp_acl_tcam_group_ops *ops; const struct mlxsw_sp_acl_tcam_pattern *patterns; unsigned int patterns_count; -}; - -struct mlxsw_sp_acl_tcam_region { - struct list_head list; /* Member of a TCAM group */ - struct list_head chunk_list; /* List of chunks under this region */ - struct parman *parman; - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_acl_tcam_group *group; - u16 id; /* ACL ID and region ID - they are same */ - char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; - struct mlxsw_afk_key_info *key_info; - struct { - struct parman_prio parman_prio; - struct parman_item parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - } catchall; + bool tmplt_elusage_set; + struct mlxsw_afk_element_usage tmplt_elusage; }; struct mlxsw_sp_acl_tcam_chunk { struct list_head list; /* Member of a TCAM region */ struct rhash_head ht_node; /* Member of a chunk HT */ unsigned int priority; /* Priority within the region and group */ - struct parman_prio parman_prio; struct mlxsw_sp_acl_tcam_group *group; struct mlxsw_sp_acl_tcam_region *region; unsigned int ref_count; + unsigned long priv[0]; + /* priv has to be always the last item */ }; struct mlxsw_sp_acl_tcam_entry { - struct parman_item parman_item; struct mlxsw_sp_acl_tcam_chunk *chunk; + unsigned long priv[0]; + /* priv has to be always the last item */ }; static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { @@ -216,13 +236,19 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, struct mlxsw_sp_acl_tcam_group *group, const struct mlxsw_sp_acl_tcam_pattern *patterns, - unsigned int patterns_count) + unsigned int patterns_count, + struct mlxsw_afk_element_usage *tmplt_elusage) { int err; group->tcam = tcam; group->patterns = patterns; group->patterns_count = patterns_count; + if (tmplt_elusage) { + group->tmplt_elusage_set = true; + memcpy(&group->tmplt_elusage, tmplt_elusage, + sizeof(group->tmplt_elusage)); + } INIT_LIST_HEAD(&group->region_list); err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); if (err) @@ -431,6 +457,15 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, const struct mlxsw_sp_acl_tcam_pattern *pattern; int i; + /* In case the template is set, we don't have to look up the pattern + * and just use the template. + */ + if (group->tmplt_elusage_set) { + memcpy(out, &group->tmplt_elusage, sizeof(*out)); + WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out)); + return; + } + for (i = 0; i < group->patterns_count; i++) { pattern = &group->patterns[i]; mlxsw_afk_element_usage_fill(out, pattern->elements, @@ -441,9 +476,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, memcpy(out, elusage, sizeof(*out)); } -#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 - static int mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) @@ -455,6 +487,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, + region->key_type, MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, region->id, region->tcam_region_info); encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); @@ -477,24 +510,13 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, { char ptar_pl[MLXSW_REG_PTAR_LEN]; - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, + mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, + region->key_type, 0, region->id, region->tcam_region_info); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); } static int -mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 new_size) -{ - char ptar_pl[MLXSW_REG_PTAR_LEN]; - - mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, - new_size, region->id, region->tcam_region_info); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); -} - -static int mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { @@ -516,193 +538,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); } -static int -mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - struct mlxsw_sp_acl_rule_info *rulei) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - char *act_set; - char *mask; - char *key; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); - mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); - - /* Only the first action set belongs here, the rest is in KVD */ - act_set = mlxsw_afa_block_first_set(rulei->act_block); - mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static void -mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - - mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, - region->tcam_region_info, offset); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); -} - -static int -mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - unsigned int offset, - bool *activity) -{ - char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - int err; - - mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, - region->tcam_region_info, offset); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); - if (err) - return err; - *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); - return 0; -} - -#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) - -static int -mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei; - int err; - - parman_prio_init(region->parman, parman_prio, - MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); - err = parman_item_add(region->parman, parman_prio, parman_item); - if (err) - goto err_parman_item_add; - - rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); - if (IS_ERR(rulei)) { - err = PTR_ERR(rulei); - goto err_rulei_create; - } - - err = mlxsw_sp_acl_rulei_act_continue(rulei); - if (WARN_ON(err)) - goto err_rulei_act_continue; - - err = mlxsw_sp_acl_rulei_commit(rulei); - if (err) - goto err_rulei_commit; - - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - parman_item->index, rulei); - region->catchall.rulei = rulei; - if (err) - goto err_rule_insert; - - return 0; - -err_rule_insert: -err_rulei_commit: -err_rulei_act_continue: - mlxsw_sp_acl_rulei_destroy(rulei); -err_rulei_create: - parman_item_remove(region->parman, parman_prio, parman_item); -err_parman_item_add: - parman_prio_fini(parman_prio); - return err; -} - -static void -mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) -{ - struct parman_prio *parman_prio = ®ion->catchall.parman_prio; - struct parman_item *parman_item = ®ion->catchall.parman_item; - struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; - - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - parman_item->index); - mlxsw_sp_acl_rulei_destroy(rulei); - parman_item_remove(region->parman, parman_prio, parman_item); - parman_prio_fini(parman_prio); -} - -static void -mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region, - u16 src_offset, u16 dst_offset, u16 size) -{ - char prcr_pl[MLXSW_REG_PRCR_LEN]; - - mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, - region->tcam_region_info, src_offset, - region->tcam_region_info, dst_offset, size); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); -} - -static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); -} - -static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_acl_tcam_region *region = priv; - struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; - - mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, - from_index, to_index, count); -} - -static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { - .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_acl_tcam_region_parman_resize, - .move = mlxsw_sp_acl_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - static struct mlxsw_sp_acl_tcam_region * mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_tcam_region *region; int err; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); if (!region) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(®ion->chunk_list); region->mlxsw_sp = mlxsw_sp; - region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, - region); - if (!region->parman) { - err = -ENOMEM; - goto err_parman_create; - } - region->key_info = mlxsw_afk_key_info_get(afk, elusage); if (IS_ERR(region->key_info)) { err = PTR_ERR(region->key_info); @@ -713,6 +564,11 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_region_id_get; + err = ops->region_associate(mlxsw_sp, region); + if (err) + goto err_tcam_region_associate; + + region->key_type = ops->key_type; err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); if (err) goto err_tcam_region_alloc; @@ -721,23 +577,22 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); + err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region); if (err) - goto err_tcam_region_catchall_add; + goto err_tcam_region_init; return region; -err_tcam_region_catchall_add: +err_tcam_region_init: mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); err_tcam_region_enable: mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); err_tcam_region_alloc: +err_tcam_region_associate: mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); err_region_id_get: mlxsw_afk_key_info_put(region->key_info); err_key_info_get: - parman_destroy(region->parman); -err_parman_create: kfree(region); return ERR_PTR(err); } @@ -746,12 +601,13 @@ static void mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { - mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); mlxsw_afk_key_info_put(region->key_info); - parman_destroy(region->parman); kfree(region); } @@ -826,13 +682,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, unsigned int priority, struct mlxsw_afk_element_usage *elusage) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) return ERR_PTR(-EINVAL); - chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); if (!chunk) return ERR_PTR(-ENOMEM); chunk->priority = priority; @@ -844,7 +701,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_chunk_assoc; - parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); + ops->chunk_init(chunk->region->priv, chunk->priv, priority); err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); @@ -854,7 +711,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, return chunk; err_rhashtable_insert: - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); err_chunk_assoc: kfree(chunk); @@ -865,11 +722,12 @@ static void mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_chunk *chunk) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_group *group = chunk->group; rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, mlxsw_sp_acl_tcam_chunk_ht_params); - parman_prio_fini(&chunk->parman_prio); + ops->chunk_fini(chunk->priv); mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); kfree(chunk); } @@ -903,11 +761,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); } +static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + return ops->entry_priv_size; +} + static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_group *group, struct mlxsw_sp_acl_tcam_entry *entry, struct mlxsw_sp_acl_rule_info *rulei) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; struct mlxsw_sp_acl_tcam_region *region; int err; @@ -918,24 +784,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(chunk); region = chunk->region; - err = parman_item_add(region->parman, &chunk->parman_prio, - &entry->parman_item); - if (err) - goto err_parman_item_add; - err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, - entry->parman_item.index, - rulei); + err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv, + entry->priv, rulei); if (err) - goto err_rule_insert; + goto err_entry_add; entry->chunk = chunk; return 0; -err_rule_insert: - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); -err_parman_item_add: +err_entry_add: mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); return err; } @@ -943,13 +801,11 @@ err_parman_item_add: static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, - entry->parman_item.index); - parman_item_remove(region->parman, &chunk->parman_prio, - &entry->parman_item); + ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv); mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); } @@ -958,22 +814,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry, bool *activity) { + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, - entry->parman_item.index, - activity); + return ops->entry_activity_get(mlxsw_sp, region->priv, + entry->priv, activity); } static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, - MLXSW_AFK_ELEMENT_DMAC, - MLXSW_AFK_ELEMENT_SMAC, + MLXSW_AFK_ELEMENT_DMAC_32_47, + MLXSW_AFK_ELEMENT_DMAC_0_31, + MLXSW_AFK_ELEMENT_SMAC_32_47, + MLXSW_AFK_ELEMENT_SMAC_0_31, MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP4, - MLXSW_AFK_ELEMENT_DST_IP4, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, MLXSW_AFK_ELEMENT_VID, @@ -987,10 +845,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { MLXSW_AFK_ELEMENT_ETHERTYPE, MLXSW_AFK_ELEMENT_IP_PROTO, - MLXSW_AFK_ELEMENT_SRC_IP6_HI, - MLXSW_AFK_ELEMENT_SRC_IP6_LO, - MLXSW_AFK_ELEMENT_DST_IP6_HI, - MLXSW_AFK_ELEMENT_DST_IP6_LO, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, }; @@ -1019,14 +881,16 @@ struct mlxsw_sp_acl_tcam_flower_rule { static int mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, - void *priv, void *ruleset_priv) + struct mlxsw_sp_acl_tcam *tcam, + void *ruleset_priv, + struct mlxsw_afk_element_usage *tmplt_elusage) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - struct mlxsw_sp_acl_tcam *tcam = priv; return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, mlxsw_sp_acl_tcam_patterns, - MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, + tmplt_elusage); } static void @@ -1070,6 +934,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) return mlxsw_sp_acl_tcam_group_id(&ruleset->group); } +static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) + + mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); +} + static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1107,7 +977,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, - .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), + .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, @@ -1118,7 +988,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = { [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, }; -static const struct mlxsw_sp_acl_profile_ops * +const struct mlxsw_sp_acl_profile_ops * mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_acl_profile profile) { @@ -1131,10 +1001,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, return NULL; return ops; } - -const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { - .priv_size = sizeof(struct mlxsw_sp_acl_tcam), - .init = mlxsw_sp_acl_tcam_init, - .fini = mlxsw_sp_acl_tcam_fini, - .profile_ops = mlxsw_sp_acl_tcam_profile_ops, -}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h new file mode 100644 index 000000000000..881ade760ace --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -0,0 +1,259 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H +#define _MLXSW_SPECTRUM_ACL_TCAM_H + +#include <linux/list.h> +#include <linux/parman.h> + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_keys.h" + +struct mlxsw_sp_acl_tcam { + unsigned long *used_regions; /* bit array */ + unsigned int max_regions; + unsigned long *used_groups; /* bit array */ + unsigned int max_groups; + unsigned int max_group_size; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 *priority, bool fillup_priority); + +struct mlxsw_sp_acl_profile_ops { + size_t ruleset_priv_size; + int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv, + struct mlxsw_afk_element_usage *tmplt_elusage); + void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress); + u16 (*ruleset_group_id)(void *ruleset_priv); + size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); + int (*rule_add)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); + void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, + bool *activity); +}; + +const struct mlxsw_sp_acl_profile_ops * +mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_acl_profile profile); + +#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 + +#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) + +#define MLXSW_SP_ACL_TCAM_MASK_LEN \ + (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE) + +struct mlxsw_sp_acl_tcam_group; + +struct mlxsw_sp_acl_tcam_region { + struct list_head list; /* Member of a TCAM group */ + struct list_head chunk_list; /* List of chunks under this region */ + struct mlxsw_sp_acl_tcam_group *group; + enum mlxsw_reg_ptar_key_type key_type; + u16 id; /* ACL ID and region ID - they are same */ + char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; + struct mlxsw_afk_key_info *key_info; + struct mlxsw_sp *mlxsw_sp; + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_acl_ctcam_region { + struct parman *parman; + const struct mlxsw_sp_acl_ctcam_region_ops *ops; + struct mlxsw_sp_acl_tcam_region *region; +}; + +struct mlxsw_sp_acl_ctcam_chunk { + struct parman_prio parman_prio; +}; + +struct mlxsw_sp_acl_ctcam_entry { + struct parman_item parman_item; +}; + +struct mlxsw_sp_acl_ctcam_region_ops { + int (*entry_insert)(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + const char *mask); + void (*entry_remove)(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry); +}; + +int +mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops); +void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion); +void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + unsigned int priority); +void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk); +int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei, + bool fillup_priority); +void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry); +static inline unsigned int +mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) +{ + return centry->parman_item.index; +} + +enum mlxsw_sp_acl_atcam_region_type { + MLXSW_SP_ACL_ATCAM_REGION_TYPE_2KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_4KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_8KB, + MLXSW_SP_ACL_ATCAM_REGION_TYPE_12KB, + __MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX, +}; + +#define MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX \ + (__MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX - 1) + +struct mlxsw_sp_acl_atcam { + struct mlxsw_sp_acl_erp_core *erp_core; +}; + +struct mlxsw_sp_acl_atcam_region { + struct rhashtable entries_ht; /* A-TCAM only */ + struct mlxsw_sp_acl_ctcam_region cregion; + const struct mlxsw_sp_acl_atcam_region_ops *ops; + struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_atcam *atcam; + enum mlxsw_sp_acl_atcam_region_type type; + struct mlxsw_sp_acl_erp_table *erp_table; + void *priv; +}; + +struct mlxsw_sp_acl_atcam_entry_ht_key { + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */ + u8 erp_id; +}; + +struct mlxsw_sp_acl_atcam_chunk { + struct mlxsw_sp_acl_ctcam_chunk cchunk; +}; + +struct mlxsw_sp_acl_atcam_entry { + struct rhash_head ht_node; + struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; + struct mlxsw_sp_acl_ctcam_entry centry; + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; + struct mlxsw_sp_acl_erp *erp; +}; + +static inline struct mlxsw_sp_acl_atcam_region * +mlxsw_sp_acl_tcam_cregion_aregion(struct mlxsw_sp_acl_ctcam_region *cregion) +{ + return container_of(cregion, struct mlxsw_sp_acl_atcam_region, cregion); +} + +static inline struct mlxsw_sp_acl_atcam_entry * +mlxsw_sp_acl_tcam_centry_aentry(struct mlxsw_sp_acl_ctcam_entry *centry) +{ + return container_of(centry, struct mlxsw_sp_acl_atcam_entry, centry); +} + +int mlxsw_sp_acl_atcam_region_associate(struct mlxsw_sp *mlxsw_sp, + u16 region_id); +int +mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_tcam_region *region, + const struct mlxsw_sp_acl_ctcam_region_ops *ops); +void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + unsigned int priority); +void mlxsw_sp_acl_atcam_chunk_fini(struct mlxsw_sp_acl_atcam_chunk *achunk); +int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei); +void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry); +int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); +void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); + +struct mlxsw_sp_acl_erp; + +bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp); +u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp); +struct mlxsw_sp_acl_erp * +mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask, bool ctcam); +void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp *erp); +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); +int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); +void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam *atcam); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index b6ed7f7c531e..c31aeb25ab5a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -255,6 +255,270 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, return 0; } +static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, + struct dcb_app *app) +{ + int prio; + + if (app->priority >= IEEE_8021QAZ_MAX_TCS) { + netdev_err(dev, "APP entry with priority value %u is invalid\n", + app->priority); + return -EINVAL; + } + + switch (app->selector) { + case IEEE_8021QAZ_APP_SEL_DSCP: + if (app->protocol >= 64) { + netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", + app->protocol); + return -EINVAL; + } + + /* Warn about any DSCP APP entries with the same PID. */ + prio = fls(dcb_ieee_getapp_mask(dev, app)); + if (prio--) { + if (prio < app->priority) + netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n", + app->priority, app->protocol, prio); + else if (prio > app->priority) + netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n", + app->priority, app->protocol, prio); + } + break; + + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + if (app->protocol) { + netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n"); + return -EINVAL; + } + break; + + default: + netdev_err(dev, "APP entries with selector %u not supported\n", + app->selector); + return -EINVAL; + } + + return 0; +} + +static u8 +mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port) +{ + u8 prio_mask; + + prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev); + if (prio_mask) + /* Take the highest configured priority. */ + return fls(prio_mask) - 1; + + return 0; +} + +static void +mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port, + u8 default_prio, + struct dcb_ieee_app_dscp_map *map) +{ + int i; + + dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) { + if (map->map[i]) + map->map[i] = fls(map->map[i]) - 1; + else + map->map[i] = default_prio; + } +} + +static bool +mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_prio_map *map) +{ + bool have_dscp = false; + int i; + + dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) { + if (map->map[i]) { + map->map[i] = fls64(map->map[i]) - 1; + have_dscp = true; + } + } + + return have_dscp; +} + +static int +mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpts_pl[MLXSW_REG_QPTS_LEN]; + + mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl); +} + +static int +mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port, + bool rewrite_dscp) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qrwe_pl[MLXSW_REG_QRWE_LEN]; + + mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port, + false, rewrite_dscp); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl); +} + +static int +mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qpts_trust_state ts) +{ + bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP; + int err; + + if (mlxsw_sp_port->dcb.trust_state == ts) + return 0; + + err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp); + if (err) + goto err_update_qrwe; + + mlxsw_sp_port->dcb.trust_state = ts; + return 0; + +err_update_qrwe: + mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, + mlxsw_sp_port->dcb.trust_state); + return err; +} + +static int +mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_dscp_map *map) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpdpm_pl[MLXSW_REG_QPDPM_LEN]; + short int i; + + mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) + mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl); +} + +static int +mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port, + struct dcb_ieee_app_prio_map *map) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qpdsm_pl[MLXSW_REG_QPDSM_LEN]; + short int i; + + mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port); + for (i = 0; i < ARRAY_SIZE(map->map); ++i) + mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl); +} + +static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct dcb_ieee_app_prio_map prio_map; + struct dcb_ieee_app_dscp_map dscp_map; + u8 default_prio; + bool have_dscp; + int err; + + default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port); + have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port, + &prio_map); + + if (!have_dscp) { + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, + MLXSW_REG_QPTS_TRUST_STATE_PCP); + if (err) + netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n"); + return err; + } + + mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio, + &dscp_map); + err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port, + &dscp_map); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n"); + return err; + } + + err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port, + &prio_map); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n"); + return err; + } + + err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port, + MLXSW_REG_QPTS_TRUST_STATE_DSCP); + if (err) { + /* A failure to set trust DSCP means that the QPDPM and QPDSM + * maps installed above are not in effect. And since we are here + * attempting to set trust DSCP, we couldn't have attempted to + * switch trust to PCP. Thus no cleanup is necessary. + */ + netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n"); + return err; + } + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_dcbnl_app_validate(dev, app); + if (err) + return err; + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port); + if (err) + goto err_update; + + return 0; + +err_update: + dcb_ieee_delapp(dev, app); + return err; +} + +static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to update DCB APP configuration\n"); + return 0; +} + static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev, struct ieee_maxrate *maxrate) { @@ -394,6 +658,8 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate, .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc, .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc, + .ieee_setapp = mlxsw_sp_dcbnl_ieee_setapp, + .ieee_delapp = mlxsw_sp_dcbnl_ieee_delapp, .getdcbx = mlxsw_sp_dcbnl_getdcbx, .setdcbx = mlxsw_sp_dcbnl_setdcbx, @@ -467,6 +733,7 @@ int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) if (err) goto err_port_pfc_init; + mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP; mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 89dbf569dff5..8213cb7190fa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -48,7 +48,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, - struct tcf_exts *exts) + struct tcf_exts *exts, + struct netlink_ext_ack *extack) { const struct tc_action *a; LIST_HEAD(actions); @@ -58,7 +59,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return 0; /* Count action is inserted first */ - err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei); + err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); if (err) return err; @@ -66,16 +67,22 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, list_for_each_entry(a, &actions, list) { if (is_tcf_gact_ok(a)) { err = mlxsw_sp_acl_rulei_act_terminate(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); return err; + } } else if (is_tcf_gact_shot(a)) { err = mlxsw_sp_acl_rulei_act_drop(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); return err; + } } else if (is_tcf_gact_trap(a)) { err = mlxsw_sp_acl_rulei_act_trap(rulei); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); return err; + } } else if (is_tcf_gact_goto_chain(a)) { u32 chain_index = tcf_gact_goto_chain_index(a); struct mlxsw_sp_acl_ruleset *ruleset; @@ -89,8 +96,10 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); return err; + } } else if (is_tcf_mirred_egress_redirect(a)) { struct net_device *out_dev; struct mlxsw_sp_fid *fid; @@ -99,20 +108,21 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); fid_index = mlxsw_sp_fid_index(fid); err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, - fid_index); + fid_index, extack); if (err) return err; out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, - out_dev); + out_dev, extack); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { struct net_device *out_dev = tcf_mirred_dev(a); err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, - block, out_dev); + block, out_dev, + extack); if (err) return err; } else if (is_tcf_vlan(a)) { @@ -123,8 +133,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, action, vid, - proto, prio); + proto, prio, extack); } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; } @@ -144,10 +155,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, FLOW_DISSECTOR_KEY_IPV4_ADDRS, f->mask); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4, - ntohl(key->src), ntohl(mask->src)); - mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4, - ntohl(key->dst), ntohl(mask->dst)); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + (char *) &key->src, + (char *) &mask->src, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + (char *) &key->dst, + (char *) &mask->dst, 4); } static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, @@ -161,24 +174,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS, f->mask); - size_t addr_half_size = sizeof(key->src) / 2; - - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI, - &key->src.s6_addr[0], - &mask->src.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO, - &key->src.s6_addr[addr_half_size], - &mask->src.s6_addr[addr_half_size], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI, - &key->dst.s6_addr[0], - &mask->dst.s6_addr[0], - addr_half_size); - mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO, - &key->dst.s6_addr[addr_half_size], - &mask->dst.s6_addr[addr_half_size], - addr_half_size); + + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, + &key->src.s6_addr[0x0], + &mask->src.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, + &key->src.s6_addr[0x4], + &mask->src.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, + &key->src.s6_addr[0x8], + &mask->src.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + &key->src.s6_addr[0xC], + &mask->src.s6_addr[0xC], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, + &key->dst.s6_addr[0x0], + &mask->dst.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, + &key->dst.s6_addr[0x4], + &mask->dst.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, + &key->dst.s6_addr[0x8], + &mask->dst.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + &key->dst.s6_addr[0xC], + &mask->dst.s6_addr[0xC], 4); } static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, @@ -192,6 +212,7 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, return 0; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); return -EINVAL; } @@ -220,6 +241,7 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return 0; if (ip_proto != IPPROTO_TCP) { + NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); return -EINVAL; } @@ -246,6 +268,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, return 0; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { + NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); return -EINVAL; } @@ -290,6 +313,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); + NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); return -EOPNOTSUPP; } @@ -340,13 +364,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, f->mask); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_DMAC, - key->dst, mask->dst, - sizeof(key->dst)); + MLXSW_AFK_ELEMENT_DMAC_32_47, + key->dst, mask->dst, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, - MLXSW_AFK_ELEMENT_SMAC, - key->src, mask->src, - sizeof(key->src)); + MLXSW_AFK_ELEMENT_DMAC_0_31, + key->dst + 2, mask->dst + 2, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC_32_47, + key->src, mask->src, 2); + mlxsw_sp_acl_rulei_keymask_buf(rulei, + MLXSW_AFK_ELEMENT_SMAC_0_31, + key->src + 2, mask->src + 2, 4); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { @@ -387,7 +415,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts); + return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, + f->common.extack); } int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, @@ -401,11 +430,12 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); - rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie); + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, + f->common.extack); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto err_rule_create; @@ -445,7 +475,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (IS_ERR(ruleset)) return; @@ -471,7 +501,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, f->common.chain_index, - MLXSW_SP_ACL_PROFILE_FLOWER); + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); if (WARN_ON(IS_ERR(ruleset))) return -EINVAL; @@ -493,3 +523,41 @@ err_rule_get_stats: mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return err; } + +int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule_info rulei; + int err; + + memset(&rulei, 0, sizeof(rulei)); + err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); + if (err) + return err; + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER, + &rulei.values.elusage); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + /* keep the reference to the ruleset */ + return 0; +} + +void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_block *block, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp_acl_ruleset *ruleset; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, + f->common.chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER, NULL); + if (IS_ERR(ruleset)) + return; + /* put the reference to the ruleset kept in create */ + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index fe4327f547d2..fd557585514d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c - * Copyright (c) 2016 Mellanox Technologies. All rights reserved. - * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,422 +33,74 @@ */ #include <linux/kernel.h> -#include <linux/bitops.h> +#include <linux/slab.h> #include "spectrum.h" -#define MLXSW_SP_KVDL_SINGLE_BASE 0 -#define MLXSW_SP_KVDL_SINGLE_SIZE 16384 -#define MLXSW_SP_KVDL_SINGLE_END \ - (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1) - -#define MLXSW_SP_KVDL_CHUNKS_BASE \ - (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) -#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152 -#define MLXSW_SP_KVDL_CHUNKS_END \ - (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \ - (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \ - (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE) -#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ - (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) - -#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1 -#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32 -#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 - -struct mlxsw_sp_kvdl_part_info { - unsigned int part_index; - unsigned int start_index; - unsigned int end_index; - unsigned int alloc_size; - enum mlxsw_sp_resource_id resource_id; -}; - -enum mlxsw_sp_kvdl_part_id { - MLXSW_SP_KVDL_PART_ID_SINGLE, - MLXSW_SP_KVDL_PART_ID_CHUNKS, - MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS, -}; - -#define MLXSW_SP_KVDL_PART_INFO(id) \ -[MLXSW_SP_KVDL_PART_ID_##id] = { \ - .start_index = MLXSW_SP_KVDL_##id##_BASE, \ - .end_index = MLXSW_SP_KVDL_##id##_END, \ - .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \ - .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ -} - -static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = { - MLXSW_SP_KVDL_PART_INFO(SINGLE), - MLXSW_SP_KVDL_PART_INFO(CHUNKS), - MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS), -}; - -#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info) - -struct mlxsw_sp_kvdl_part { - struct mlxsw_sp_kvdl_part_info info; - unsigned long usage[0]; /* Entries */ -}; - struct mlxsw_sp_kvdl { - struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN]; + const struct mlxsw_sp_kvdl_ops *kvdl_ops; + unsigned long priv[0]; + /* priv has to be always the last item */ }; -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, - unsigned int alloc_size) -{ - struct mlxsw_sp_kvdl_part *part, *min_part = NULL; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (alloc_size <= part->info.alloc_size && - (!min_part || - part->info.alloc_size <= min_part->info.alloc_size)) - min_part = part; - } - - return min_part ?: ERR_PTR(-ENOBUFS); -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) -{ - struct mlxsw_sp_kvdl_part *part; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - part = kvdl->parts[i]; - if (kvdl_index >= part->info.start_index && - kvdl_index <= part->info.end_index) - return part; - } - - return ERR_PTR(-EINVAL); -} - -static u32 -mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info, - unsigned int entry_index) -{ - return info->start_index + entry_index * info->alloc_size; -} - -static unsigned int -mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, - u32 kvdl_index) -{ - return (kvdl_index - info->start_index) / info->alloc_size; -} - -static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, - u32 *p_kvdl_index) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index, nr_entries; - - nr_entries = (info->end_index - info->start_index + 1) / - info->alloc_size; - entry_index = find_first_zero_bit(part->usage, nr_entries); - if (entry_index == nr_entries) - return -ENOBUFS; - __set_bit(entry_index, part->usage); - - *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index); - - return 0; -} - -static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, - u32 kvdl_index) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int entry_index; - - entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index); - __clear_bit(entry_index, part->usage); -} - -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, - u32 *p_entry_index) -{ - struct mlxsw_sp_kvdl_part *part; - - /* Find partition with smallest allocation size satisfying the - * requested size. - */ - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - return mlxsw_sp_kvdl_part_alloc(part, p_entry_index); -} - -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) -{ - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index); - if (IS_ERR(part)) - return; - mlxsw_sp_kvdl_part_free(part, entry_index); -} - -int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, - unsigned int entry_count, - unsigned int *p_alloc_size) -{ - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); - if (IS_ERR(part)) - return PTR_ERR(part); - - *p_alloc_size = part->info.alloc_size; - - return 0; -} - -static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part, - struct mlxsw_sp_kvdl_part *part_prev, - unsigned int size) -{ - - if (!part_prev) { - part->info.end_index = size - 1; - } else { - part->info.start_index = part_prev->info.end_index + 1; - part->info.end_index = part->info.start_index + size - 1; - } -} - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_kvdl_part_info *info, - struct mlxsw_sp_kvdl_part *part_prev) +int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl_part *part; - bool need_update = true; - unsigned int nr_entries; - size_t usage_size; - u64 resource_size; + const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops; + struct mlxsw_sp_kvdl *kvdl; int err; - err = devlink_resource_size_get(devlink, info->resource_id, - &resource_size); - if (err) { - need_update = false; - resource_size = info->end_index - info->start_index + 1; - } - - nr_entries = div_u64(resource_size, info->alloc_size); - usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); - part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); - if (!part) - return ERR_PTR(-ENOMEM); - - memcpy(&part->info, info, sizeof(part->info)); - - if (need_update) - mlxsw_sp_kvdl_part_update(part, part_prev, resource_size); - return part; -} - -static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part) -{ - kfree(part); -} - -static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - const struct mlxsw_sp_kvdl_part_info *info; - struct mlxsw_sp_kvdl_part *part_prev = NULL; - int err, i; + kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size, + GFP_KERNEL); + if (!kvdl) + return -ENOMEM; + kvdl->kvdl_ops = kvdl_ops; + mlxsw_sp->kvdl = kvdl; - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { - info = &mlxsw_sp_kvdl_parts_info[i]; - kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info, - part_prev); - if (IS_ERR(kvdl->parts[i])) { - err = PTR_ERR(kvdl->parts[i]); - goto err_kvdl_part_init; - } - part_prev = kvdl->parts[i]; - } + err = kvdl_ops->init(mlxsw_sp, kvdl->priv); + if (err) + goto err_init; return 0; -err_kvdl_part_init: - for (i--; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); +err_init: + kfree(kvdl); return err; } -static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) +void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); -} - -static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) -{ - const struct mlxsw_sp_kvdl_part_info *info = &part->info; - unsigned int nr_entries; - int bit = -1; - u64 occ = 0; - - nr_entries = (info->end_index - - info->start_index + 1) / - info->alloc_size; - while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) - < nr_entries) - occ += info->alloc_size; - return occ; -} - -static u64 mlxsw_sp_kvdl_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - u64 occ = 0; - int i; - - for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) - occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]); - - return occ; -} - -static u64 mlxsw_sp_kvdl_single_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); -} - -static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv) -{ - const struct mlxsw_sp *mlxsw_sp = priv; - struct mlxsw_sp_kvdl_part *part; - part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; - return mlxsw_sp_kvdl_part_occ(part); + kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv); + kfree(kvdl); } -int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, u32 *p_entry_index) { - struct devlink *devlink = priv_to_devlink(mlxsw_core); - static struct devlink_resource_size_params size_params; - u32 kvdl_max_size; - int err; - - kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - - MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, - MLXSW_SP_KVDL_SINGLE_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; - - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, - MLXSW_SP_KVDL_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - if (err) - return err; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, - MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE, - DEVLINK_RESOURCE_UNIT_ENTRY); - err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - MLXSW_SP_RESOURCE_KVD_LINEAR, - &size_params); - return err; + return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type, + entry_count, p_entry_index); } -int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, int entry_index) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_kvdl *kvdl; - int err; - - kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL); - if (!kvdl) - return -ENOMEM; - mlxsw_sp->kvdl = kvdl; - - err = mlxsw_sp_kvdl_parts_init(mlxsw_sp); - if (err) - goto err_kvdl_parts_init; - - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR, - mlxsw_sp_kvdl_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, - mlxsw_sp_kvdl_single_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, - mlxsw_sp_kvdl_chunks_occ_get, - mlxsw_sp); - devlink_resource_occ_get_register(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, - mlxsw_sp_kvdl_large_chunks_occ_get, - mlxsw_sp); - - return 0; + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; -err_kvdl_parts_init: - kfree(mlxsw_sp->kvdl); - return err; + kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type, + entry_count, entry_index); } -void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) +int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_kvdl_entry_type type, + unsigned int entry_count, + unsigned int *p_alloc_count) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE); - devlink_resource_occ_get_unregister(devlink, - MLXSW_SP_RESOURCE_KVD_LINEAR); - mlxsw_sp_kvdl_parts_fini(mlxsw_sp); - kfree(mlxsw_sp->kvdl); + return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type, + entry_count, p_alloc_count); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index a82539609d49..98dcaf78365c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_mr *mr = mlxsw_sp->mr; cancel_delayed_work_sync(&mr->stats_update_dw); - mr->mr_ops->fini(mr->priv); + mr->mr_ops->fini(mlxsw_sp, mr->priv); kfree(mr); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index 7c864a86811d..c92fa90dca31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action { MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD, }; -enum mlxsw_sp_mr_route_prio { - MLXSW_SP_MR_ROUTE_PRIO_SG, - MLXSW_SP_MR_ROUTE_PRIO_STARG, - MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, - __MLXSW_SP_MR_ROUTE_PRIO_MAX -}; - -#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) - struct mlxsw_sp_mr_route_key { int vrid; enum mlxsw_sp_l3proto proto; @@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops { u16 erif_index); void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv); - void (*fini)(void *priv); + void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv); }; struct mlxsw_sp_mr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 4f4c0d311883..e9c9f1f45b9d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -1,7 +1,8 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,7 +36,6 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> -#include <linux/parman.h> #include "spectrum_mr_tcam.h" #include "reg.h" @@ -43,15 +43,8 @@ #include "core_acl_flex_actions.h" #include "spectrum_mr.h" -struct mlxsw_sp_mr_tcam_region { - struct mlxsw_sp *mlxsw_sp; - enum mlxsw_reg_rtar_key_type rtar_key_type; - struct parman *parman; - struct parman_prio *parman_prios; -}; - struct mlxsw_sp_mr_tcam { - struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; + void *priv; }; /* This struct maps to one RIGR2 register entry */ @@ -84,8 +77,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list) INIT_LIST_HEAD(&erif_list->erif_sublists); } -#define MLXSW_SP_KVDL_RIGR2_SIZE 1 - static struct mlxsw_sp_mr_erif_sublist * mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list) @@ -96,8 +87,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL); if (!erif_sublist) return ERR_PTR(-ENOMEM); - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE, - &erif_sublist->rigr2_kvdl_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + 1, &erif_sublist->rigr2_kvdl_index); if (err) { kfree(erif_sublist); return ERR_PTR(err); @@ -112,7 +103,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_erif_sublist *erif_sublist) { list_del(&erif_sublist->list); - mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + 1, erif_sublist->rigr2_kvdl_index); kfree(erif_sublist); } @@ -221,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route { struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; u32 counter_index; - struct parman_item parman_item; - struct parman_prio *parman_prio; enum mlxsw_sp_mr_route_action action; struct mlxsw_sp_mr_route_key key; u16 irif_index; u16 min_mtu; + void *priv; }; static struct mlxsw_afa_block * @@ -297,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block) mlxsw_afa_block_destroy(afa_block); } -static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, - struct parman_item *parman_item, - struct mlxsw_sp_mr_route_key *key, - struct mlxsw_afa_block *afa_block) -{ - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - ntohl(key->group.addr4), - ntohl(key->group_mask.addr4), - ntohl(key->source.addr4), - ntohl(key->source_mask.addr4), - mlxsw_afa_block_first_set(afa_block)); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, - key->vrid, - MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, - key->group.addr6, - key->group_mask.addr6, - key->source.addr6, - key->source_mask.addr6, - mlxsw_afa_block_first_set(afa_block)); - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - -static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, - struct mlxsw_sp_mr_route_key *key, - struct parman_item *parman_item) -{ - struct in6_addr zero_addr = IN6ADDR_ANY_INIT; - char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - - switch (key->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, 0, 0, 0, 0, NULL); - break; - case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, - vrid, 0, 0, zero_addr, zero_addr, - zero_addr, zero_addr, NULL); - break; - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); -} - static int mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam_erif_list *erif_list, @@ -370,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, return 0; } -static struct mlxsw_sp_mr_tcam_region * -mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam, - enum mlxsw_sp_l3proto proto) -{ - return &mr_tcam->tcam_regions[proto]; -} - -static int -mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route, - enum mlxsw_sp_mr_route_prio prio) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - int err; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - err = parman_item_add(tcam_region->parman, - &tcam_region->parman_prios[prio], - &route->parman_item); - if (err) - return err; - - route->parman_prio = &tcam_region->parman_prios[prio]; - return 0; -} - -static void -mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, - struct mlxsw_sp_mr_tcam_route *route) -{ - struct mlxsw_sp_mr_tcam_region *tcam_region; - - tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, - route->key.proto); - - parman_item_remove(tcam_region->parman, - route->parman_prio, &route->parman_item); -} - static int mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv, struct mlxsw_sp_mr_route_params *route_params) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; int err; @@ -448,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, goto err_afa_block_create; } - /* Allocate place in the TCAM */ - err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route, - route_params->prio); - if (err) - goto err_parman_item_add; + route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL); + if (!route->priv) { + err = -ENOMEM; + goto err_route_priv_alloc; + } /* Write the route to the TCAM */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, route->afa_block); + err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv, + &route->key, route->afa_block, + route_params->prio); if (err) - goto err_route_replace; + goto err_route_create; return 0; -err_route_replace: - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); -err_parman_item_add: +err_route_create: + kfree(route->priv); +err_route_priv_alloc: mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); err_afa_block_create: mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); @@ -476,12 +375,12 @@ err_counter_alloc: static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, - &route->key, &route->parman_item); - mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); + ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key); + kfree(route->priv); mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); @@ -502,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, enum mlxsw_sp_mr_route_action route_action) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -516,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -534,6 +433,7 @@ err: static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 min_mtu) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_afa_block *afa_block; int err; @@ -549,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(afa_block); /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err; @@ -596,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, void *route_priv, u16 erif_index) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_erif_sublist *erif_sublist; struct mlxsw_sp_mr_tcam_erif_list erif_list; @@ -630,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -653,6 +552,7 @@ static int mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, struct mlxsw_sp_mr_route_info *route_info) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam_route *route = route_priv; struct mlxsw_sp_mr_tcam_erif_list erif_list; struct mlxsw_afa_block *afa_block; @@ -677,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, } /* Update the TCAM route entry */ - err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, - &route->key, afa_block); + err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); if (err) goto err_route_write; @@ -699,167 +598,36 @@ err_erif_populate: return err; } -#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16 -#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16 - -static int -mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, - mr_tcam_region->rtar_key_type, - MLXSW_SP_MR_TCAM_REGION_BASE_COUNT); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void -mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, - mr_tcam_region->rtar_key_type, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv, - unsigned long new_count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rtar_pl[MLXSW_REG_RTAR_LEN]; - u64 max_tcam_rules; - - max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); - if (new_count > max_tcam_rules) - return -EINVAL; - mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, - mr_tcam_region->rtar_key_type, new_count); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); -} - -static void mlxsw_sp_mr_tcam_region_parman_move(void *priv, - unsigned long from_index, - unsigned long to_index, - unsigned long count) -{ - struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; - struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; - char rrcr_pl[MLXSW_REG_RRCR_LEN]; - - mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, - from_index, count, - mr_tcam_region->rtar_key_type, to_index); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); -} - -static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = { - .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT, - .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP, - .resize = mlxsw_sp_mr_tcam_region_parman_resize, - .move = mlxsw_sp_mr_tcam_region_parman_move, - .algo = PARMAN_ALGO_TYPE_LSORT, -}; - -static int -mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mr_tcam_region *mr_tcam_region, - enum mlxsw_reg_rtar_key_type rtar_key_type) -{ - struct parman_prio *parman_prios; - struct parman *parman; - int err; - int i; - - mr_tcam_region->rtar_key_type = rtar_key_type; - mr_tcam_region->mlxsw_sp = mlxsw_sp; - - err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region); - if (err) - return err; - - parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops, - mr_tcam_region); - if (!parman) { - err = -ENOMEM; - goto err_parman_create; - } - mr_tcam_region->parman = parman; - - parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, - sizeof(*parman_prios), GFP_KERNEL); - if (!parman_prios) { - err = -ENOMEM; - goto err_parman_prios_alloc; - } - mr_tcam_region->parman_prios = parman_prios; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_init(mr_tcam_region->parman, - &mr_tcam_region->parman_prios[i], i); - return 0; - -err_parman_prios_alloc: - parman_destroy(parman); -err_parman_create: - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); - return err; -} - -static void -mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) -{ - int i; - - for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) - parman_prio_fini(&mr_tcam_region->parman_prios[i]); - kfree(mr_tcam_region->parman_prios); - parman_destroy(mr_tcam_region->parman); - mlxsw_sp_mr_tcam_region_free(mr_tcam_region); -} - static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - u32 rtar_key; int err; - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || - !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES)) return -EIO; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV4], - rtar_key); - if (err) - return err; + mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL); + if (!mr_tcam->priv) + return -ENOMEM; - rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; - err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - ®ion[MLXSW_SP_L3_PROTO_IPV6], - rtar_key); + err = ops->init(mlxsw_sp, mr_tcam->priv); if (err) - goto err_ipv6_region_init; - + goto err_init; return 0; -err_ipv6_region_init: - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); +err_init: + kfree(mr_tcam->priv); return err; } -static void mlxsw_sp_mr_tcam_fini(void *priv) +static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) { + const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops; struct mlxsw_sp_mr_tcam *mr_tcam = priv; - struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); - mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + ops->fini(mr_tcam->priv); + kfree(mr_tcam->priv); } const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 77b2adb29341..eec7166fad62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -48,6 +48,7 @@ #include <linux/route.h> #include <linux/gcd.h> #include <linux/random.h> +#include <linux/if_macvlan.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@ -60,6 +61,7 @@ #include <net/ndisc.h> #include <net/ipv6.h> #include <net/fib_notifier.h> +#include <net/switchdev.h> #include "spectrum.h" #include "core.h" @@ -163,7 +165,9 @@ struct mlxsw_sp_rif_ops { const struct mlxsw_sp_rif_params *params); int (*configure)(struct mlxsw_sp_rif *rif); void (*deconfigure)(struct mlxsw_sp_rif *rif); - struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif); + struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack); + void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); @@ -342,10 +346,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif) mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); } -static struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); - #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { @@ -1109,7 +1109,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, u32 tunnel_index; int err; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, &tunnel_index); if (err) return err; @@ -1125,7 +1126,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; - mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + 1, fib_entry->decap.tunnel_index); } static struct mlxsw_sp_fib_node * @@ -2434,17 +2436,48 @@ static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) kfree(net_work); } +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); + +static void mlxsw_sp_router_update_priority_work(struct work_struct *work) +{ + struct mlxsw_sp_netevent_work *net_work = + container_of(work, struct mlxsw_sp_netevent_work, work); + struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; + + __mlxsw_sp_router_init(mlxsw_sp); + kfree(net_work); +} + +static int mlxsw_sp_router_schedule_work(struct net *net, + struct notifier_block *nb, + void (*cb)(struct work_struct *)) +{ + struct mlxsw_sp_netevent_work *net_work; + struct mlxsw_sp_router *router; + + if (!net_eq(net, &init_net)) + return NOTIFY_DONE; + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (!net_work) + return NOTIFY_BAD; + + router = container_of(nb, struct mlxsw_sp_router, netevent_nb); + INIT_WORK(&net_work->work, cb); + net_work->mlxsw_sp = router->mlxsw_sp; + mlxsw_core_schedule_work(&net_work->work); + return NOTIFY_DONE; +} + static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct mlxsw_sp_netevent_work *net_work; struct mlxsw_sp_port *mlxsw_sp_port; - struct mlxsw_sp_router *router; struct mlxsw_sp *mlxsw_sp; unsigned long interval; struct neigh_parms *p; struct neighbour *n; - struct net *net; switch (event) { case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -2498,20 +2531,12 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, break; case NETEVENT_IPV4_MPATH_HASH_UPDATE: case NETEVENT_IPV6_MPATH_HASH_UPDATE: - net = ptr; - - if (!net_eq(net, &init_net)) - return NOTIFY_DONE; - - net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); - if (!net_work) - return NOTIFY_BAD; + return mlxsw_sp_router_schedule_work(ptr, nb, + mlxsw_sp_router_mp_hash_event_work); - router = container_of(nb, struct mlxsw_sp_router, netevent_nb); - INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work); - net_work->mlxsw_sp = router->mlxsw_sp; - mlxsw_core_schedule_work(&net_work->work); - break; + case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE: + return mlxsw_sp_router_schedule_work(ptr, nb, + mlxsw_sp_router_update_priority_work); } return NOTIFY_DONE; @@ -3165,8 +3190,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, * by the device and make sure the request can be satisfied. */ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); - err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size, - &alloc_size); + err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + *p_adj_grp_size, &alloc_size); if (err) return err; /* It is possible the allocation results in more allocated @@ -3278,7 +3304,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* No valid allocation size available. */ goto set_trap; - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + ecmp_size, &adj_index); if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. @@ -3313,7 +3340,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, old_adj_index, old_ecmp_size); - mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + old_ecmp_size, old_adj_index); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; @@ -3335,7 +3363,8 @@ set_trap: if (err) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); if (old_adj_index_valid) - mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, + nh_grp->ecmp_size, nh_grp->adj_index); } static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, @@ -5967,7 +5996,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } -static struct mlxsw_sp_rif * +struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { @@ -6024,6 +6053,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, !list_empty(&inet6_dev->addr_list)) addr_list_empty = false; + /* macvlans do not have a RIF, but rather piggy back on the + * RIF of their lower device. + */ + if (netif_is_macvlan(dev) && addr_list_empty) + return true; + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; @@ -6125,6 +6160,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) return rif->dev; } +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif) +{ + return rif->fid; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params, @@ -6162,7 +6202,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, rif->ops = ops; if (ops->fid_get) { - fid = ops->fid_get(rif); + fid = ops->fid_get(rif, extack); if (IS_ERR(fid)) { err = PTR_ERR(fid); goto err_fid_get; @@ -6267,7 +6307,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, } /* FID was already created, just take a reference */ - fid = rif->ops->fid_get(rif); + fid = rif->ops->fid_get(rif, extack); err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid); if (err) goto err_fid_port_vid_map; @@ -6432,6 +6472,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, return 0; } +static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac) +{ + u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp4, mask); +} + +static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac) +{ + u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 }; + u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + + return ether_addr_equal_masked(mac, vrrp6, mask); +} + +static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const u8 *mac, bool adding) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + u8 vrrp_id = adding ? mac[5] : 0; + int err; + + if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) && + !mlxsw_sp_rif_macvlan_is_vrrp6(mac)) + return 0; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + if (mlxsw_sp_rif_macvlan_is_vrrp4(mac)) + mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id); + else + mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev, + struct netlink_ext_ack *extack) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + int err; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + if (!rif) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), true); + if (err) + return err; + + err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, + macvlan_dev->dev_addr, true); + if (err) + goto err_rif_vrrp_add; + + /* Make sure the bridge driver does not have this MAC pointing at + * some other port. + */ + if (rif->ops->fdb_del) + rif->ops->fdb_del(rif, macvlan_dev->dev_addr); + + return 0; + +err_rif_vrrp_add: + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); + return err; +} + +void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, + const struct net_device *macvlan_dev) +{ + struct macvlan_dev *vlan = netdev_priv(macvlan_dev); + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev); + /* If we do not have a RIF, then we already took care of + * removing the macvlan's MAC during RIF deletion. + */ + if (!rif) + return; + mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr, + false); + mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp; + + mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); + case NETDEV_DOWN: + mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev); + break; + } + + return 0; +} + static int __mlxsw_sp_inetaddr_event(struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) @@ -6444,6 +6601,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev, return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); else if (is_vlan_dev(dev)) return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); + else if (netif_is_macvlan(dev)) + return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack); else return 0; } @@ -6684,7 +6843,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); int err = 0; - if (!mlxsw_sp) + /* We do not create a RIF for a macvlan, but only use it to + * direct more MAC addresses to the router. + */ + if (!mlxsw_sp || netif_is_macvlan(l3_dev)) return 0; switch (event) { @@ -6705,6 +6867,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, return err; } +static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data) +{ + struct mlxsw_sp_rif *rif = data; + + if (!netif_is_macvlan(dev)) + return 0; + + return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr, + mlxsw_sp_fid_index(rif->fid), false); +} + +static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) +{ + if (!netif_is_macvlan_port(rif->dev)) + return 0; + + netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n"); + return netdev_walk_all_upper_dev_rcu(rif->dev, + __mlxsw_sp_rif_macvlan_flush, rif); +} + static struct mlxsw_sp_rif_subport * mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) { @@ -6771,11 +6954,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_rif_subport_op(rif, false); } static struct mlxsw_sp_fid * -mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index); } @@ -6857,6 +7042,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6865,19 +7051,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { - u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1; + u16 vid; + int err; + + if (is_vlan_dev(rif->dev)) { + vid = vlan_dev_vlan_id(rif->dev); + } else { + err = br_vlan_get_pvid(rif->dev, &vid); + if (err < 0 || !vid) { + NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID"); + return ERR_PTR(-EINVAL); + } + } return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); } +static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid); + struct switchdev_notifier_fdb_info info; + struct net_device *br_dev; + struct net_device *dev; + + br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev; + dev = br_fdb_find_port(br_dev, mac, vid); + if (!dev) + return; + + info.addr = mac; + info.vid = vid; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { .type = MLXSW_SP_RIF_TYPE_VLAN, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_vlan_configure, .deconfigure = mlxsw_sp_rif_vlan_deconfigure, .fid_get = mlxsw_sp_rif_vlan_fid_get, + .fdb_del = mlxsw_sp_rif_vlan_fdb_del, }; static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) @@ -6929,6 +7145,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_rif_set(fid, NULL); mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr, mlxsw_sp_fid_index(fid), false); + mlxsw_sp_rif_macvlan_flush(rif); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, @@ -6937,17 +7154,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) } static struct mlxsw_sp_fid * -mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif) +mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, + struct netlink_ext_ack *extack) { return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); } +static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) +{ + struct switchdev_notifier_fdb_info info; + struct net_device *dev; + + dev = br_fdb_find_port(rif->dev, mac, 0); + if (!dev) + return; + + info.addr = mac; + info.vid = 0; + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); +} + static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .type = MLXSW_SP_RIF_TYPE_FID, .rif_size = sizeof(struct mlxsw_sp_rif), .configure = mlxsw_sp_rif_fid_configure, .deconfigure = mlxsw_sp_rif_fid_deconfigure, .fid_get = mlxsw_sp_rif_fid_fid_get, + .fdb_del = mlxsw_sp_rif_fid_fdb_del, }; static struct mlxsw_sp_rif_ipip_lb * @@ -7172,6 +7405,7 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { + bool usp = init_net.ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; int err; @@ -7182,7 +7416,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); - mlxsw_reg_rgcr_usp_set(rgcr_pl, true); + mlxsw_reg_rgcr_usp_set(rgcr_pl, usp); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a01edcf56797..52e25695625c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -75,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 3d187d88cc7c..e42d640cddab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -36,6 +36,7 @@ #include <linux/list.h> #include <net/arp.h> #include <net/gre.h> +#include <net/lag.h> #include <net/ndisc.h> #include <net/ip6_tunnel.h> @@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev) struct list_head *iter; netdev_for_each_lower_dev(lag_dev, dev, iter) - if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) + if (netif_carrier_ok(dev) && + net_lag_port_dev_txable(dev) && + mlxsw_sp_port_dev_check(dev)) return dev; return NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index eea5666a86b2..da94e1eb9e16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1135,6 +1135,39 @@ err_port_vlan_set: return err; } +static int +mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_fid *fid; + u16 pvid; + u16 vid; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); + if (!rif) + return 0; + fid = mlxsw_sp_rif_fid(rif); + pvid = mlxsw_sp_fid_8021q_vid(fid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + if (vid != pvid) { + netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); + return -EBUSY; + } + } else { + if (vid == pvid) { + netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); + return -EBUSY; + } + } + } + + return 0; +} + static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) @@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; - if (netif_is_bridge_master(orig_dev)) - return -EOPNOTSUPP; + if (netif_is_bridge_master(orig_dev)) { + int err = 0; + + if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && + br_vlan_enabled(orig_dev) && + switchdev_trans_ph_prepare(trans)) + err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, + orig_dev, vlan); + if (!err) + err = -EOPNOTSUPP; + return err; + } if (switchdev_trans_ph_prepare(trans)) return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 399e9d6993f7..eb437f59640d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -63,6 +63,7 @@ enum { MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IPV4_PIM = 0x58, + MLXSW_TRAP_ID_IPV4_VRRP = 0x59, MLXSW_TRAP_ID_RPF = 0x5C, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, @@ -78,6 +79,7 @@ enum { MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_IPV6_PIM = 0x79, + MLXSW_TRAP_ID_IPV6_VRRP = 0x7A, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index b72d1bd11296..ebbdfb908745 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port) */ static void port_set_link_speed(struct ksz_port *port) { - struct ksz_port_info *info; struct ksz_hw *hw = port->hw; u16 data; u16 cfg; @@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port) int p; for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { - info = &hw->port_info[p]; - port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status); diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile index 2e982cc249fb..43f47cb45fe2 100644 --- a/drivers/net/ethernet/microchip/Makefile +++ b/drivers/net/ethernet/microchip/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o obj-$(CONFIG_LAN743X) += lan743x.o -lan743x-objs := lan743x_main.o +lan743x-objs := lan743x_main.o lan743x_ethtool.o diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c new file mode 100644 index 000000000000..c25b3e97ae26 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -0,0 +1,696 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#include <linux/netdevice.h> +#include "lan743x_main.h" +#include "lan743x_ethtool.h" +#include <linux/pci.h> +#include <linux/phy.h> + +/* eeprom */ +#define LAN743X_EEPROM_MAGIC (0x74A5) +#define LAN743X_OTP_MAGIC (0x74F3) +#define EEPROM_INDICATOR_1 (0xA5) +#define EEPROM_INDICATOR_2 (0xAA) +#define EEPROM_MAC_OFFSET (0x01) +#define MAX_EEPROM_SIZE 512 +#define OTP_INDICATOR_1 (0xF3) +#define OTP_INDICATOR_2 (0xF7) + +static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + unsigned long timeout; + u32 buf; + int i; + + buf = lan743x_csr_read(adapter, OTP_PWR_DN); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + lan743x_csr_write(adapter, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + udelay(1); + buf = lan743x_csr_read(adapter, OTP_PWR_DN); + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "timeout on OTP_PWR_DN completion\n"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + /* set to BYTE program mode */ + lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + + for (i = 0; i < length; i++) { + lan743x_csr_write(adapter, OTP_ADDR1, + ((offset + i) >> 8) & + OTP_ADDR1_15_11_MASK_); + lan743x_csr_write(adapter, OTP_ADDR2, + ((offset + i) & + OTP_ADDR2_10_3_MASK_)); + lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]); + lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + buf = lan743x_csr_read(adapter, OTP_STATUS); + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "Timeout on OTP_STATUS completion\n"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + } + + return 0; +} + +static int lan743x_eeprom_wait(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_) || + (val & E2P_CMD_EPC_TIMEOUT_)) + break; + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { + netif_warn(adapter, drv, adapter->netdev, + "EEPROM read operation timeout\n"); + return -EIO; + } + + return 0; +} + +static int lan743x_eeprom_confirm_not_busy(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 val; + + do { + val = lan743x_csr_read(adapter, E2P_CMD); + + if (!(val & E2P_CMD_EPC_BUSY_)) + return 0; + + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + netif_warn(adapter, drv, adapter->netdev, "EEPROM is busy\n"); + return -EIO; +} + +static int lan743x_eeprom_read(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + for (i = 0; i < length; i++) { + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + val = lan743x_csr_read(adapter, E2P_DATA); + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan743x_eeprom_write(struct lan743x_adapter *adapter, + u32 offset, u32 length, u8 *data) +{ + int retval; + u32 val; + int i; + + retval = lan743x_eeprom_confirm_not_busy(adapter); + if (retval) + return retval; + + /* Issue write/erase enable command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + for (i = 0; i < length; i++) { + /* Fill data register */ + val = data[i]; + lan743x_csr_write(adapter, E2P_DATA, val); + + /* Send "write" command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + lan743x_csr_write(adapter, E2P_CMD, val); + + retval = lan743x_eeprom_wait(adapter); + if (retval < 0) + return retval; + + offset++; + } + + return 0; +} + +static void lan743x_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, + pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +static u32 lan743x_ethtool_get_msglevel(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void lan743x_ethtool_set_msglevel(struct net_device *netdev, + u32 msglevel) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = msglevel; +} + +static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) +{ + return MAX_EEPROM_SIZE; +} + +static int lan743x_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return lan743x_eeprom_read(adapter, ee->offset, ee->len, data); +} + +static int lan743x_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = -EINVAL; + + if (ee->magic == LAN743X_EEPROM_MAGIC) + ret = lan743x_eeprom_write(adapter, ee->offset, ee->len, + data); + /* Beware! OTP is One Time Programming ONLY! + * So do some strict condition check before messing up + */ + else if ((ee->magic == LAN743X_OTP_MAGIC) && + (ee->offset == 0) && + (ee->len == MAX_EEPROM_SIZE) && + (data[0] == OTP_INDICATOR_1)) + ret = lan743x_otp_write(adapter, ee->offset, ee->len, data); + + return ret; +} + +static const char lan743x_set0_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX FCS Errors", + "RX Alignment Errors", + "Rx Fragment Errors", + "RX Jabber Errors", + "RX Undersize Frame Errors", + "RX Oversize Frame Errors", + "RX Dropped Frames", + "RX Unicast Byte Count", + "RX Broadcast Byte Count", + "RX Multicast Byte Count", + "RX Unicast Frames", + "RX Broadcast Frames", + "RX Multicast Frames", + "RX Pause Frames", + "RX 64 Byte Frames", + "RX 65 - 127 Byte Frames", + "RX 128 - 255 Byte Frames", + "RX 256 - 511 Bytes Frames", + "RX 512 - 1023 Byte Frames", + "RX 1024 - 1518 Byte Frames", + "RX Greater 1518 Byte Frames", +}; + +static const char lan743x_set1_sw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Queue 0 Frames", + "RX Queue 1 Frames", + "RX Queue 2 Frames", + "RX Queue 3 Frames", +}; + +static const char lan743x_set2_hw_cnt_strings[][ETH_GSTRING_LEN] = { + "RX Total Frames", + "EEE RX LPI Transitions", + "EEE RX LPI Time", + "RX Counter Rollover Status", + "TX FCS Errors", + "TX Excess Deferral Errors", + "TX Carrier Errors", + "TX Bad Byte Count", + "TX Single Collisions", + "TX Multiple Collisions", + "TX Excessive Collision", + "TX Late Collisions", + "TX Unicast Byte Count", + "TX Broadcast Byte Count", + "TX Multicast Byte Count", + "TX Unicast Frames", + "TX Broadcast Frames", + "TX Multicast Frames", + "TX Pause Frames", + "TX 64 Byte Frames", + "TX 65 - 127 Byte Frames", + "TX 128 - 255 Byte Frames", + "TX 256 - 511 Bytes Frames", + "TX 512 - 1023 Byte Frames", + "TX 1024 - 1518 Byte Frames", + "TX Greater 1518 Byte Frames", + "TX Total Frames", + "EEE TX LPI Transitions", + "EEE TX LPI Time", + "TX Counter Rollover Status", +}; + +static const u32 lan743x_set0_hw_cnt_addr[] = { + STAT_RX_FCS_ERRORS, + STAT_RX_ALIGNMENT_ERRORS, + STAT_RX_FRAGMENT_ERRORS, + STAT_RX_JABBER_ERRORS, + STAT_RX_UNDERSIZE_FRAME_ERRORS, + STAT_RX_OVERSIZE_FRAME_ERRORS, + STAT_RX_DROPPED_FRAMES, + STAT_RX_UNICAST_BYTE_COUNT, + STAT_RX_BROADCAST_BYTE_COUNT, + STAT_RX_MULTICAST_BYTE_COUNT, + STAT_RX_UNICAST_FRAMES, + STAT_RX_BROADCAST_FRAMES, + STAT_RX_MULTICAST_FRAMES, + STAT_RX_PAUSE_FRAMES, + STAT_RX_64_BYTE_FRAMES, + STAT_RX_65_127_BYTE_FRAMES, + STAT_RX_128_255_BYTE_FRAMES, + STAT_RX_256_511_BYTES_FRAMES, + STAT_RX_512_1023_BYTE_FRAMES, + STAT_RX_1024_1518_BYTE_FRAMES, + STAT_RX_GREATER_1518_BYTE_FRAMES, +}; + +static const u32 lan743x_set2_hw_cnt_addr[] = { + STAT_RX_TOTAL_FRAMES, + STAT_EEE_RX_LPI_TRANSITIONS, + STAT_EEE_RX_LPI_TIME, + STAT_RX_COUNTER_ROLLOVER_STATUS, + STAT_TX_FCS_ERRORS, + STAT_TX_EXCESS_DEFERRAL_ERRORS, + STAT_TX_CARRIER_ERRORS, + STAT_TX_BAD_BYTE_COUNT, + STAT_TX_SINGLE_COLLISIONS, + STAT_TX_MULTIPLE_COLLISIONS, + STAT_TX_EXCESSIVE_COLLISION, + STAT_TX_LATE_COLLISIONS, + STAT_TX_UNICAST_BYTE_COUNT, + STAT_TX_BROADCAST_BYTE_COUNT, + STAT_TX_MULTICAST_BYTE_COUNT, + STAT_TX_UNICAST_FRAMES, + STAT_TX_BROADCAST_FRAMES, + STAT_TX_MULTICAST_FRAMES, + STAT_TX_PAUSE_FRAMES, + STAT_TX_64_BYTE_FRAMES, + STAT_TX_65_127_BYTE_FRAMES, + STAT_TX_128_255_BYTE_FRAMES, + STAT_TX_256_511_BYTES_FRAMES, + STAT_TX_512_1023_BYTE_FRAMES, + STAT_TX_1024_1518_BYTE_FRAMES, + STAT_TX_GREATER_1518_BYTE_FRAMES, + STAT_TX_TOTAL_FRAMES, + STAT_EEE_TX_LPI_TRANSITIONS, + STAT_EEE_TX_LPI_TIME, + STAT_TX_COUNTER_ROLLOVER_STATUS +}; + +static void lan743x_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, lan743x_set0_hw_cnt_strings, + sizeof(lan743x_set0_hw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings)], + lan743x_set1_sw_cnt_strings, + sizeof(lan743x_set1_sw_cnt_strings)); + memcpy(&data[sizeof(lan743x_set0_hw_cnt_strings) + + sizeof(lan743x_set1_sw_cnt_strings)], + lan743x_set2_hw_cnt_strings, + sizeof(lan743x_set2_hw_cnt_strings)); + break; + } +} + +static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int data_index = 0; + u32 buf; + int i; + + for (i = 0; i < ARRAY_SIZE(lan743x_set0_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set0_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } + for (i = 0; i < ARRAY_SIZE(adapter->rx); i++) + data[data_index++] = (u64)(adapter->rx[i].frame_count); + for (i = 0; i < ARRAY_SIZE(lan743x_set2_hw_cnt_addr); i++) { + buf = lan743x_csr_read(adapter, lan743x_set2_hw_cnt_addr[i]); + data[data_index++] = (u64)buf; + } +} + +static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + { + int ret; + + ret = ARRAY_SIZE(lan743x_set0_hw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set1_sw_cnt_strings); + ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings); + return ret; + } + default: + return -EOPNOTSUPP; + } +} + +static int lan743x_ethtool_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + rxnfc->data = 0; + switch (rxnfc->flow_type) { + case TCP_V4_FLOW:case UDP_V4_FLOW: + case TCP_V6_FLOW:case UDP_V6_FLOW: + rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case IPV4_FLOW: case IPV6_FLOW: + rxnfc->data |= RXH_IP_SRC | RXH_IP_DST; + return 0; + } + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = LAN743X_USED_RX_CHANNELS; + return 0; + } + return -EOPNOTSUPP; +} + +static u32 lan743x_ethtool_get_rxfh_key_size(struct net_device *netdev) +{ + return 40; +} + +static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev) +{ + return 128; +} + +static int lan743x_ethtool_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (indir) { + int dw_index; + int byte_index = 0; + + for (dw_index = 0; dw_index < 32; dw_index++) { + u32 four_entries = + lan743x_csr_read(adapter, RFE_INDX(dw_index)); + + byte_index = dw_index << 2; + indir[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + indir[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + indir[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + indir[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (key) { + int dword_index; + int byte_index = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + u32 four_entries = + lan743x_csr_read(adapter, + RFE_HASH_KEY(dword_index)); + + byte_index = dword_index << 2; + key[byte_index + 0] = + ((four_entries >> 0) & 0x000000FF); + key[byte_index + 1] = + ((four_entries >> 8) & 0x000000FF); + key[byte_index + 2] = + ((four_entries >> 16) & 0x000000FF); + key[byte_index + 3] = + ((four_entries >> 24) & 0x000000FF); + } + } + if (hfunc) + (*hfunc) = ETH_RSS_HASH_TOP; + return 0; +} + +static int lan743x_ethtool_set_rxfh(struct net_device *netdev, + const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + u32 indir_value = 0; + int dword_index = 0; + int byte_index = 0; + + for (dword_index = 0; dword_index < 32; dword_index++) { + byte_index = dword_index << 2; + indir_value = + (((indir[byte_index + 0] & 0x000000FF) << 0) | + ((indir[byte_index + 1] & 0x000000FF) << 8) | + ((indir[byte_index + 2] & 0x000000FF) << 16) | + ((indir[byte_index + 3] & 0x000000FF) << 24)); + lan743x_csr_write(adapter, RFE_INDX(dword_index), + indir_value); + } + } + if (key) { + int dword_index = 0; + int byte_index = 0; + u32 key_value = 0; + + for (dword_index = 0; dword_index < 10; dword_index++) { + byte_index = dword_index << 2; + key_value = + ((((u32)(key[byte_index + 0])) << 0) | + (((u32)(key[byte_index + 1])) << 8) | + (((u32)(key[byte_index + 2])) << 16) | + (((u32)(key[byte_index + 3])) << 24)); + lan743x_csr_write(adapter, RFE_HASH_KEY(dword_index), + key_value); + } + } + return 0; +} + +static int lan743x_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 buf; + int ret; + + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + ret = phy_ethtool_get_eee(phydev, eee); + if (ret < 0) + return ret; + + buf = lan743x_csr_read(adapter, MAC_CR); + if (buf & MAC_CR_EEE_EN_) { + eee->eee_enabled = true; + eee->eee_active = !!(eee->advertised & eee->lp_advertised); + eee->tx_lpi_enabled = true; + /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ + buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); + eee->tx_lpi_timer = buf; + } else { + eee->eee_enabled = false; + eee->eee_active = false; + eee->tx_lpi_enabled = false; + eee->tx_lpi_timer = 0; + } + + return 0; +} + +static int lan743x_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *eee) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + u32 buf = 0; + int ret = 0; + + if (!netdev) + return -EINVAL; + adapter = netdev_priv(netdev); + if (!adapter) + return -EINVAL; + phydev = netdev->phydev; + if (!phydev) + return -EIO; + if (!phydev->drv) { + netif_err(adapter, drv, adapter->netdev, + "Missing PHY Driver\n"); + return -EIO; + } + + if (eee->eee_enabled) { + ret = phy_init_eee(phydev, 0); + if (ret) { + netif_err(adapter, drv, adapter->netdev, + "EEE initialization failed\n"); + return ret; + } + + buf = (u32)eee->tx_lpi_timer; + lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); + + buf = lan743x_csr_read(adapter, MAC_CR); + buf |= MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } else { + buf = lan743x_csr_read(adapter, MAC_CR); + buf &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, buf); + } + + return phy_ethtool_set_eee(phydev, eee); +} + +#ifdef CONFIG_PM +static void lan743x_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + phy_ethtool_get_wol(netdev->phydev, wol); + + wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | + WAKE_MAGIC | WAKE_PHY | WAKE_ARP; + + wol->wolopts |= adapter->wolopts; +} + +static int lan743x_ethtool_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->wolopts = 0; + if (wol->wolopts & WAKE_UCAST) + adapter->wolopts |= WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + adapter->wolopts |= WAKE_MCAST; + if (wol->wolopts & WAKE_BCAST) + adapter->wolopts |= WAKE_BCAST; + if (wol->wolopts & WAKE_MAGIC) + adapter->wolopts |= WAKE_MAGIC; + if (wol->wolopts & WAKE_PHY) + adapter->wolopts |= WAKE_PHY; + if (wol->wolopts & WAKE_ARP) + adapter->wolopts |= WAKE_ARP; + + device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); + + phy_ethtool_set_wol(netdev->phydev, wol); + + return 0; +} +#endif /* CONFIG_PM */ + +const struct ethtool_ops lan743x_ethtool_ops = { + .get_drvinfo = lan743x_ethtool_get_drvinfo, + .get_msglevel = lan743x_ethtool_get_msglevel, + .set_msglevel = lan743x_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + + .get_eeprom_len = lan743x_ethtool_get_eeprom_len, + .get_eeprom = lan743x_ethtool_get_eeprom, + .set_eeprom = lan743x_ethtool_set_eeprom, + .get_strings = lan743x_ethtool_get_strings, + .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, + .get_sset_count = lan743x_ethtool_get_sset_count, + .get_rxnfc = lan743x_ethtool_get_rxnfc, + .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size, + .get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size, + .get_rxfh = lan743x_ethtool_get_rxfh, + .set_rxfh = lan743x_ethtool_set_rxfh, + .get_eee = lan743x_ethtool_get_eee, + .set_eee = lan743x_ethtool_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +#ifdef CONFIG_PM + .get_wol = lan743x_ethtool_get_wol, + .set_wol = lan743x_ethtool_set_wol, +#endif +}; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.h b/drivers/net/ethernet/microchip/lan743x_ethtool.h new file mode 100644 index 000000000000..d0d11a777a58 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2018 Microchip Technology Inc. */ + +#ifndef _LAN743X_ETHTOOL_H +#define _LAN743X_ETHTOOL_H + +#include "linux/ethtool.h" + +extern const struct ethtool_ops lan743x_ethtool_ops; + +#endif /* _LAN743X_ETHTOOL_H */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dd947e4dd3ce..bb323f269839 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -11,7 +11,9 @@ #include <linux/phy.h> #include <linux/rtnetlink.h> #include <linux/iopoll.h> +#include <linux/crc16.h> #include "lan743x_main.h" +#include "lan743x_ethtool.h" static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) { @@ -53,13 +55,13 @@ return_error: return ret; } -static u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) { return ioread32(&adapter->csr.csr_address[offset]); } -static void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, - u32 data) +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, + u32 data) { iowrite32(data, &adapter->csr.csr_address[offset]); } @@ -828,7 +830,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) } if (!mac_address_valid) - random_ether_addr(adapter->mac_address); + eth_random_addr(adapter->mac_address); lan743x_mac_set_address(adapter, adapter->mac_address); ether_addr_copy(netdev->dev_addr, adapter->mac_address); return 0; @@ -1023,6 +1025,24 @@ return_error: return ret; } +static void lan743x_rfe_open(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, RFE_RSS_CFG, + RFE_RSS_CFG_UDP_IPV6_EX_ | + RFE_RSS_CFG_TCP_IPV6_EX_ | + RFE_RSS_CFG_IPV6_EX_ | + RFE_RSS_CFG_UDP_IPV6_ | + RFE_RSS_CFG_TCP_IPV6_ | + RFE_RSS_CFG_IPV6_ | + RFE_RSS_CFG_UDP_IPV4_ | + RFE_RSS_CFG_TCP_IPV4_ | + RFE_RSS_CFG_IPV4_ | + RFE_RSS_CFG_VALID_HASH_BITS_ | + RFE_RSS_CFG_RSS_QUEUE_ENABLE_ | + RFE_RSS_CFG_RSS_HASH_STORE_ | + RFE_RSS_CFG_RSS_ENABLE_); +} + static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) { u8 *mac_addr; @@ -2417,6 +2437,8 @@ static int lan743x_netdev_open(struct net_device *netdev) if (ret) goto close_mac; + lan743x_rfe_open(adapter); + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { ret = lan743x_rx_open(&adapter->rx[index]); if (ret) @@ -2689,6 +2711,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, goto cleanup_hardware; adapter->netdev->netdev_ops = &lan743x_netdev_ops; + adapter->netdev->ethtool_ops = &lan743x_ethtool_ops; adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; adapter->netdev->hw_features = adapter->netdev->features; @@ -2747,10 +2770,182 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) lan743x_netdev_close(netdev); rtnl_unlock(); +#ifdef CONFIG_PM + pci_save_state(pdev); +#endif + /* clean up lan743x portion */ lan743x_hardware_cleanup(adapter); } +#ifdef CONFIG_PM +static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) +{ + return bitrev16(crc16(0xFFFF, buf, len)); +} + +static void lan743x_pm_set_wol(struct lan743x_adapter *adapter) +{ + const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; + const u8 ipv6_multicast[3] = { 0x33, 0x33 }; + const u8 arp_type[2] = { 0x08, 0x06 }; + int mask_index; + u32 pmtctl; + u32 wucsr; + u32 macrx; + u16 crc; + + for (mask_index = 0; mask_index < MAC_NUM_OF_WUF_CFG; mask_index++) + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), 0); + + /* clear wake settings */ + pmtctl = lan743x_csr_read(adapter, PMT_CTL); + pmtctl |= PMT_CTL_WUPS_MASK_; + pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ | + PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ | + PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_); + + macrx = lan743x_csr_read(adapter, MAC_RX); + + wucsr = 0; + mask_index = 0; + + pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_; + + if (adapter->wolopts & WAKE_PHY) { + pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_; + pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_; + } + if (adapter->wolopts & WAKE_MAGIC) { + wucsr |= MAC_WUCSR_MPEN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + } + if (adapter->wolopts & WAKE_UCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_PFDA_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_BCAST) { + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_BCST_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_MCAST) { + /* IPv4 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv4_multicast, 3); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 7); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + /* IPv6 multicast */ + crc = lan743x_pm_wakeframe_crc16(ipv6_multicast, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_MCAST_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 3); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + if (adapter->wolopts & WAKE_ARP) { + /* set MAC_WUF_CFG & WUF_MASK + * for packettype (offset 12,13) = ARP (0x0806) + */ + crc = lan743x_pm_wakeframe_crc16(arp_type, 2); + lan743x_csr_write(adapter, MAC_WUF_CFG(mask_index), + MAC_WUF_CFG_EN_ | MAC_WUF_CFG_TYPE_ALL_ | + (0 << MAC_WUF_CFG_OFFSET_SHIFT_) | + (crc & MAC_WUF_CFG_CRC16_MASK_)); + lan743x_csr_write(adapter, MAC_WUF_MASK0(mask_index), 0x3000); + lan743x_csr_write(adapter, MAC_WUF_MASK1(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK2(mask_index), 0); + lan743x_csr_write(adapter, MAC_WUF_MASK3(mask_index), 0); + mask_index++; + + wucsr |= MAC_WUCSR_RFE_WAKE_EN_ | MAC_WUCSR_WAKE_EN_; + macrx |= MAC_RX_RXEN_; + pmtctl |= PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_; + pmtctl |= PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_; + } + + lan743x_csr_write(adapter, MAC_WUCSR, wucsr); + lan743x_csr_write(adapter, PMT_CTL, pmtctl); + lan743x_csr_write(adapter, MAC_RX, macrx); +} + +static int lan743x_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + lan743x_pcidev_shutdown(pdev); + + /* clear all wakes */ + lan743x_csr_write(adapter, MAC_WUCSR, 0); + lan743x_csr_write(adapter, MAC_WUCSR2, 0); + lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF); + + if (adapter->wolopts) + lan743x_pm_set_wol(adapter); + + /* Host sets PME_En, put D3hot */ + ret = pci_prepare_to_sleep(pdev); + + return 0; +} + +static int lan743x_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + ret = lan743x_hardware_init(adapter, pdev); + if (ret) { + netif_err(adapter, probe, adapter->netdev, + "lan743x_hardware_init returned %d\n", ret); + } + + /* open netdev when netdev is at running state while resume. + * For instance, it is true when system wakesup after pm-suspend + * However, it is false when system wakes up after suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_open(netdev); + + netif_device_attach(netdev); + + return 0; +} + +static const struct dev_pm_ops lan743x_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) +}; +#endif /*CONFIG_PM */ + static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, { 0, } @@ -2761,6 +2956,9 @@ static struct pci_driver lan743x_pcidev_driver = { .id_table = lan743x_pcidev_tbl, .probe = lan743x_pcidev_probe, .remove = lan743x_pcidev_remove, +#ifdef CONFIG_PM + .driver.pm = &lan743x_pm_ops, +#endif .shutdown = lan743x_pcidev_shutdown, }; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 73b463a9df61..4fa7a5e027f4 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -24,8 +24,18 @@ #define HW_CFG_LRST_ BIT(1) #define PMT_CTL (0x014) +#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27) +#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25) +#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24) +#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23) +#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) +#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) +#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) #define PMT_CTL_READY_ BIT(7) #define PMT_CTL_ETH_PHY_RST_ BIT(4) +#define PMT_CTL_WOL_EN_ BIT(3) +#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2) +#define PMT_CTL_WUPS_MASK_ (0x00000003) #define DP_SEL (0x024) #define DP_SEL_DPRDY_ BIT(31) @@ -42,6 +52,16 @@ #define DP_DATA_0 (0x030) +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ BIT(31) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ BIT(10) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) + #define FCT_RX_CTL (0xAC) #define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) #define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) @@ -62,6 +82,7 @@ ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_) #define MAC_CR (0x100) +#define MAC_CR_EEE_EN_ BIT(17) #define MAC_CR_ADD_ BIT(12) #define MAC_CR_ASD_ BIT(11) #define MAC_CR_CNTR_RST_ BIT(5) @@ -97,6 +118,40 @@ #define MAC_MII_DATA (0x124) +#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130) + +#define MAC_WUCSR (0x140) +#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) +#define MAC_WUCSR_PFDA_EN_ BIT(3) +#define MAC_WUCSR_WAKE_EN_ BIT(2) +#define MAC_WUCSR_MPEN_ BIT(1) +#define MAC_WUCSR_BCST_EN_ BIT(0) + +#define MAC_WK_SRC (0x144) + +#define MAC_WUF_CFG0 (0x150) +#define MAC_NUM_OF_WUF_CFG (32) +#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0) +#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index))) +#define MAC_WUF_CFG_EN_ BIT(31) +#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000) +#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000) +#define MAC_WUF_CFG_OFFSET_SHIFT_ (16) +#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF) + +#define MAC_WUF_MASK0_0 (0x200) +#define MAC_WUF_MASK0_1 (0x204) +#define MAC_WUF_MASK0_2 (0x208) +#define MAC_WUF_MASK0_3 (0x20C) +#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0) +#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1) +#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2) +#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3) +#define MAC_WUF_MASK0(index) (MAC_WUF_MASK0_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK1(index) (MAC_WUF_MASK1_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK2(index) (MAC_WUF_MASK2_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK3(index) (MAC_WUF_MASK3_BEGIN + (0x10 * (index))) + /* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */ #define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x))) #define RFE_ADDR_FILT_HI_VALID_ BIT(31) @@ -111,6 +166,27 @@ #define RFE_CTL_MCAST_HASH_ BIT(3) #define RFE_CTL_DA_PERFECT_ BIT(1) +#define RFE_RSS_CFG (0x554) +#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16) +#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15) +#define RFE_RSS_CFG_IPV6_EX_ BIT(14) +#define RFE_RSS_CFG_UDP_IPV6_ BIT(13) +#define RFE_RSS_CFG_TCP_IPV6_ BIT(12) +#define RFE_RSS_CFG_IPV6_ BIT(11) +#define RFE_RSS_CFG_UDP_IPV4_ BIT(10) +#define RFE_RSS_CFG_TCP_IPV4_ BIT(9) +#define RFE_RSS_CFG_IPV4_ BIT(8) +#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0) +#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2) +#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1) +#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0) + +#define RFE_HASH_KEY(index) (0x558 + (index << 2)) + +#define RFE_INDX(index) (0x580 + (index << 2)) + +#define MAC_WUCSR2 (0x600) + #define INT_STS (0x780) #define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) #define INT_BIT_ALL_RX_ (0x0F000000) @@ -288,9 +364,33 @@ #define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3) #define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007) +#define OTP_PWR_DN (0x1000) +#define OTP_PWR_DN_PWRDN_N_ BIT(0) + +#define OTP_ADDR1 (0x1004) +#define OTP_ADDR1_15_11_MASK_ (0x1F) + +#define OTP_ADDR2 (0x1008) +#define OTP_ADDR2_10_3_MASK_ (0xFF) + +#define OTP_PRGM_DATA (0x1010) + +#define OTP_PRGM_MODE (0x1014) +#define OTP_PRGM_MODE_BYTE_ BIT(0) + +#define OTP_TST_CMD (0x1024) +#define OTP_TST_CMD_PRGVRFY_ BIT(3) + +#define OTP_CMD_GO (0x1028) +#define OTP_CMD_GO_GO_ BIT(0) + +#define OTP_STATUS (0x1030) +#define OTP_STATUS_BUSY_ BIT(0) + /* MAC statistics registers */ #define STAT_RX_FCS_ERRORS (0x1200) #define STAT_RX_ALIGNMENT_ERRORS (0x1204) +#define STAT_RX_FRAGMENT_ERRORS (0x1208) #define STAT_RX_JABBER_ERRORS (0x120C) #define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210) #define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214) @@ -298,12 +398,26 @@ #define STAT_RX_UNICAST_BYTE_COUNT (0x121C) #define STAT_RX_BROADCAST_BYTE_COUNT (0x1220) #define STAT_RX_MULTICAST_BYTE_COUNT (0x1224) +#define STAT_RX_UNICAST_FRAMES (0x1228) +#define STAT_RX_BROADCAST_FRAMES (0x122C) #define STAT_RX_MULTICAST_FRAMES (0x1230) +#define STAT_RX_PAUSE_FRAMES (0x1234) +#define STAT_RX_64_BYTE_FRAMES (0x1238) +#define STAT_RX_65_127_BYTE_FRAMES (0x123C) +#define STAT_RX_128_255_BYTE_FRAMES (0x1240) +#define STAT_RX_256_511_BYTES_FRAMES (0x1244) +#define STAT_RX_512_1023_BYTE_FRAMES (0x1248) +#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C) +#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250) #define STAT_RX_TOTAL_FRAMES (0x1254) +#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258) +#define STAT_EEE_RX_LPI_TIME (0x125C) +#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C) #define STAT_TX_FCS_ERRORS (0x1280) #define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284) #define STAT_TX_CARRIER_ERRORS (0x1288) +#define STAT_TX_BAD_BYTE_COUNT (0x128C) #define STAT_TX_SINGLE_COLLISIONS (0x1290) #define STAT_TX_MULTIPLE_COLLISIONS (0x1294) #define STAT_TX_EXCESSIVE_COLLISION (0x1298) @@ -311,8 +425,21 @@ #define STAT_TX_UNICAST_BYTE_COUNT (0x12A0) #define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4) #define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8) +#define STAT_TX_UNICAST_FRAMES (0x12AC) +#define STAT_TX_BROADCAST_FRAMES (0x12B0) #define STAT_TX_MULTICAST_FRAMES (0x12B4) +#define STAT_TX_PAUSE_FRAMES (0x12B8) +#define STAT_TX_64_BYTE_FRAMES (0x12BC) +#define STAT_TX_65_127_BYTE_FRAMES (0x12C0) +#define STAT_TX_128_255_BYTE_FRAMES (0x12C4) +#define STAT_TX_256_511_BYTES_FRAMES (0x12C8) +#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC) +#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0) +#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4) #define STAT_TX_TOTAL_FRAMES (0x12D8) +#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC) +#define STAT_EEE_TX_LPI_TIME (0x12E0) +#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC) /* End of Register definitions */ @@ -473,6 +600,9 @@ struct lan743x_adapter { struct net_device *netdev; struct mii_bus *mdiobus; int msg_enable; +#ifdef CONFIG_PM + u32 wolopts; +#endif struct pci_dev *pdev; struct lan743x_csr csr; struct lan743x_intr intr; @@ -594,4 +724,7 @@ struct lan743x_rx_buffer_info { #define RX_PROCESS_RESULT_PACKET_RECEIVED (1) #define RX_PROCESS_RESULT_PACKET_DROPPED (2) +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset); +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data); + #endif /* _LAN743X_H */ diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 776a8a9be8e3..1a4f2bb48ead 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) return 0; } +static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) +{ + /* Select the VID to configure */ + ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid), + ANA_TABLES_VLANTIDX); + /* Set the vlan port members mask and issue a write command */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) | + ANA_TABLES_VLANACCESS_CMD_WRITE, + ANA_TABLES_VLANACCESS); + + return ocelot_vlant_wait_for_completion(ocelot); +} + +static void ocelot_vlan_mode(struct ocelot_port *port, + netdev_features_t features) +{ + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(p); + else + val &= ~BIT(p); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static void ocelot_vlan_port_apply(struct ocelot *ocelot, + struct ocelot_port *port) +{ + u32 val; + + /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to clasify for untagged frames (may be zero) */ + val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid); + if (port->vlan_aware) + val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1); + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_VLAN_CFG_VLAN_VID_M | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, + ANA_PORT_VLAN_CFG, port->chip_port); + + /* Drop frames with multicast source address */ + val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA; + if (port->vlan_aware && !port->vid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port); + + /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */ + val = REW_TAG_CFG_TAG_TPID_CFG(0); + + if (port->vlan_aware) { + if (port->vid) + /* Tag all frames except when VID == DEFAULT_VLAN */ + val |= REW_TAG_CFG_TAG_CFG(1); + else + /* Tag all frames */ + val |= REW_TAG_CFG_TAG_CFG(3); + } + ocelot_rmw_gix(ocelot, val, + REW_TAG_CFG_TAG_TPID_CFG_M | + REW_TAG_CFG_TAG_CFG_M, + REW_TAG_CFG, port->chip_port); + + /* Set default VLAN and tag type to 8021Q. */ + val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) | + REW_PORT_VLAN_CFG_PORT_VID(port->vid); + ocelot_rmw_gix(ocelot, val, + REW_PORT_VLAN_CFG_PORT_TPID_M | + REW_PORT_VLAN_CFG_PORT_VID_M, + REW_PORT_VLAN_CFG, port->chip_port); +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + /* Make the port a member of the VLAN */ + ocelot->vlan_mask[vid] |= BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Default ingress vlan classification */ + if (pvid) + port->pvid = vid; + + /* Untagged egress vlan clasification */ + if (untagged) + port->vid = vid; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + /* Stop the port from being a member of the vlan */ + ocelot->vlan_mask[vid] &= ~BIT(port->chip_port); + ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + if (ret) + return ret; + + /* Ingress */ + if (port->pvid == vid) + port->pvid = 0; + + /* Egress */ + if (port->vid == vid) + port->vid = 0; + + ocelot_vlan_port_apply(ocelot, port); + + return 0; +} + static void ocelot_vlan_init(struct ocelot *ocelot) { + u16 port, vid; + /* Clear VLAN table, by default all ports are members of all VLANs */ ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, ANA_TABLES_VLANACCESS); ocelot_vlant_wait_for_completion(ocelot); + + /* Configure the port VLAN memberships */ + for (vid = 1; vid < VLAN_N_VID; vid++) { + ocelot->vlan_mask[vid] = 0; + ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); + } + + /* Because VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, but + * we can't rely on it since module may be not loaded. + */ + ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); + ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + + /* Configure the CPU port to be VLAN aware */ + ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | + ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA | + ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1), + ANA_PORT_VLAN_CFG, ocelot->num_phys_ports); + + /* Set vlan ingress filter mask to all ports but the CPU port by + * default. + */ + ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); + ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port); + } } /* Watermark encode @@ -539,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + if (!vid) { + if (!port->vlan_aware) + /* If the bridge is not VLAN aware and no VID was + * provided, set it to pvid to ensure the MAC entry + * matches incoming untagged packets + */ + vid = port->pvid; + else + /* If the bridge is VLAN aware a VID must be provided as + * otherwise the learnt entry wouldn't match any frame. + */ + return -EINVAL; + } + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, ENTRYTYPE_NORMAL); } @@ -690,6 +883,30 @@ end: return ret; } +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, true); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ocelot_port *port = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(port, features); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -701,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_fdb_add = ocelot_fdb_add, .ndo_fdb_del = ocelot_fdb_del, .ndo_fdb_dump = ocelot_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -780,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .get_strings = ocelot_get_strings, .get_ethtool_stats = ocelot_get_ethtool_stats, .get_sset_count = ocelot_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ocelot_port_attr_get(struct net_device *dev, @@ -914,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port->vlan_aware = attr->u.vlan_filtering; + ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port); + break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); break; @@ -925,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev, return err; } +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_add(dev, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_vlan_del_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_del(dev, vid); + + if (ret) + return ret; + } + + return 0; +} + static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, const unsigned char *addr, u16 vid) @@ -951,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, bool new = false; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { @@ -992,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, u16 vid = mdb->vid; if (!vid) - vid = 1; + vid = port->pvid; mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) @@ -1024,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), trans); @@ -1041,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev, int ret = 0; switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_vlan_del_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -1086,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, if (!ocelot->bridge_mask) ocelot->hw_bridge_dev = NULL; + + /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */ + ocelot_port->vlan_aware = 0; + ocelot_port->pvid = 0; + ocelot_port->vid = 0; +} + +static void ocelot_set_aggr_pgids(struct ocelot *ocelot) +{ + int i, port, lag; + + /* Reset destination and aggregation PGIDS */ + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + + for (i = PGID_AGGR; i < PGID_SRC; i++) + ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), + ANA_PGID_PGID, i); + + /* Now, set PGIDs for each LAG */ + for (lag = 0; lag < ocelot->num_phys_ports; lag++) { + unsigned long bond_mask; + int aggr_count = 0; + u8 aggr_idx[16]; + + bond_mask = ocelot->lags[lag]; + if (!bond_mask) + continue; + + for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) { + // Destination mask + ocelot_write_rix(ocelot, bond_mask, + ANA_PGID_PGID, port); + aggr_idx[aggr_count] = port; + aggr_count++; + } + + for (i = PGID_AGGR; i < PGID_SRC; i++) { + u32 ac; + + ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); + ac &= ~bond_mask; + ac |= BIT(aggr_idx[i % aggr_count]); + ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i); + } + } +} + +static void ocelot_setup_lag(struct ocelot *ocelot, int lag) +{ + unsigned long bond_mask = ocelot->lags[lag]; + unsigned int p; + + for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) { + u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + + /* Use lag port as logical port for port i */ + ocelot_write_gix(ocelot, port_cfg | + ANA_PORT_PORT_CFG_PORTID_VAL(lag), + ANA_PORT_PORT_CFG, p); + } +} + +static int ocelot_port_lag_join(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + int lag, lp; + struct net_device *ndev; + u32 bond_mask = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond, ndev) { + struct ocelot_port *port = netdev_priv(ndev); + + bond_mask |= BIT(port->chip_port); + } + rcu_read_unlock(); + + lp = __ffs(bond_mask); + + /* If the new port is the lowest one, use it as the logical port from + * now on + */ + if (p == lp) { + lag = p; + ocelot->lags[p] = bond_mask; + bond_mask &= ~BIT(p); + if (bond_mask) { + lp = __ffs(bond_mask); + ocelot->lags[lp] = 0; + } + } else { + lag = lp; + ocelot->lags[lp] |= BIT(p); + } + + ocelot_setup_lag(ocelot, lag); + ocelot_set_aggr_pgids(ocelot); + + return 0; +} + +static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port, + struct net_device *bond) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + int p = ocelot_port->chip_port; + u32 port_cfg; + int i; + + /* Remove port from any lag */ + for (i = 0; i < ocelot->num_phys_ports; i++) + ocelot->lags[i] &= ~BIT(ocelot_port->chip_port); + + /* if it was the logical port of the lag, move the lag config to the + * next port + */ + if (ocelot->lags[p]) { + int n = __ffs(ocelot->lags[p]); + + ocelot->lags[n] = ocelot->lags[p]; + ocelot->lags[p] = 0; + + ocelot_setup_lag(ocelot, n); + } + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p); + port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M; + ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p), + ANA_PORT_PORT_CFG, p); + + ocelot_set_aggr_pgids(ocelot); } /* Checks if the net_device instance given to us originate from our driver. */ @@ -1113,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev, else ocelot_port_bridge_leave(ocelot_port, info->upper_dev); + + ocelot_vlan_port_apply(ocelot_port->ocelot, + ocelot_port); + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_lag_join(ocelot_port, + info->upper_dev); + else + ocelot_port_lag_leave(ocelot_port, + info->upper_dev); } break; default: @@ -1129,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; + if (event == NETDEV_PRECHANGEUPPER && + netif_is_lag_master(info->upper_dev)) { + struct netdev_lag_upper_info *lag_upper_info = info->upper_info; + struct netlink_ext_ack *extack; + + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + extack = netdev_notifier_info_to_extack(&info->info); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + + ret = -EINVAL; + goto notify; + } + } + if (netif_is_lag_master(dev)) { struct net_device *slave; struct list_head *iter; @@ -1176,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); dev->dev_addr[ETH_ALEN - 1] += port; ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, @@ -1187,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, goto err_register_netdev; } + /* Basic L2 initialization */ + ocelot_vlan_port_apply(ocelot, ocelot_port); + return 0; err_register_netdev: @@ -1201,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot) int i, cpu = ocelot->num_phys_ports; char queue_name[32]; + ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(u32), GFP_KERNEL); + if (!ocelot->lags) + return -ENOMEM; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 097bd12a10d4..616bec30dfa3 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -493,7 +493,7 @@ struct ocelot { u8 num_cpu_ports; struct ocelot_port **ports; - u16 lags[16]; + u32 *lags; /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 18df7d934e81..26bb3b18f3be 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) info->port = (ifh[2] & GENMASK(14, 11)) >> 11; info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; - info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16; + info->tag_type = (ifh[3] & BIT(16)) >> 16; info->vid = ifh[3] & GENMASK(11, 0); return 0; diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig index 71899009c468..c26e0f70c494 100644 --- a/drivers/net/ethernet/neterion/Kconfig +++ b/drivers/net/ethernet/neterion/Kconfig @@ -2,8 +2,8 @@ # Exar device configuration # -config NET_VENDOR_EXAR - bool "Exar devices" +config NET_VENDOR_NETERION + bool "Neterion (Exar) devices" default y depends on PCI ---help--- @@ -11,16 +11,19 @@ config NET_VENDOR_EXAR Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all - the questions about Exar cards. If you say Y, you will be asked for - your specific card in the following questions. + the questions about Neterion/Exar cards. If you say Y, you will be + asked for your specific card in the following questions. -if NET_VENDOR_EXAR +if NET_VENDOR_NETERION config S2IO - tristate "Exar Xframe 10Gb Ethernet Adapter" + tristate "Neterion (Exar) Xframe 10Gb Ethernet Adapter" depends on PCI ---help--- This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters. + These were originally released from S2IO, which renamed itself + Neterion. So, the adapters might be labeled as either one, depending + on its age. More specific information on configuring the driver is in <file:Documentation/networking/s2io.txt>. @@ -29,11 +32,13 @@ config S2IO will be called s2io. config VXGE - tristate "Exar X3100 Series 10GbE PCIe Server Adapter" + tristate "Neterion (Exar) X3100 Series 10GbE PCIe Server Adapter" depends on PCI ---help--- This driver supports Exar Corp's X3100 Series 10 GbE PCIe - I/O Virtualized Server Adapter. + I/O Virtualized Server Adapter. These were originally released from + Neterion, which was later acquired by Exar. So, the adapters might be + labeled as either one, depending on its age. More specific information on configuring the driver is in <file:Documentation/networking/vxge.txt>. @@ -50,4 +55,4 @@ config VXGE_DEBUG_TRACE_ALL the vxge driver. By default only few debug trace statements are enabled. -endif # NET_VENDOR_EXAR +endif # NET_VENDOR_NETERION diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 358ed6118881..a2c0a93ca8b6 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -14,7 +14,6 @@ #include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/pci.h> -#include <linux/pci_hotplug.h> #include <linux/slab.h> #include "vxge-traffic.h" diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8a92088df0d7..1d9e36835404 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -34,10 +34,11 @@ #define pr_fmt(fmt) "NFP net bpf: " fmt #include <linux/bug.h> -#include <linux/kernel.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/kernel.h> #include <linux/pkt_cls.h> +#include <linux/reciprocal_div.h> #include <linux/unistd.h> #include "main.h" @@ -416,6 +417,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst, } static void +__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg, + enum mul_type type, enum mul_step step, u16 breg, bool swap, + bool wr_both, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_MUL_BASE | + FIELD_PREP(OP_MUL_A_SRC, areg) | + FIELD_PREP(OP_MUL_B_SRC, breg) | + FIELD_PREP(OP_MUL_STEP, step) | + FIELD_PREP(OP_MUL_DST_AB, dst_ab) | + FIELD_PREP(OP_MUL_SW, swap) | + FIELD_PREP(OP_MUL_TYPE, type) | + FIELD_PREP(OP_MUL_WR_AB, wr_both) | + FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void +emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type, + enum mul_step step, swreg rreg) +{ + struct nfp_insn_ur_regs reg; + u16 areg; + int err; + + if (type == MUL_TYPE_START && step != MUL_STEP_NONE) { + nfp_prog->error = -EINVAL; + return; + } + + if (step == MUL_LAST || step == MUL_LAST_2) { + /* When type is step and step Number is LAST or LAST2, left + * source is used as destination. + */ + err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®); + areg = reg.dst; + } else { + err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®); + areg = reg.areg; + } + + if (err) { + nfp_prog->error = err; + return; + } + + __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap, + reg.wr_both, reg.dst_lmextn, reg.src_lmextn); +} + +static void __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, bool zero, bool swap, bool wr_both, @@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) xfer_num = round_up(len, 4) / 4; if (src_40bit_addr) - addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base, + addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base, &off); /* Setup PREV_ALU fields to override memory read length. */ @@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) SHF_SC_R_ROT, 16); } +static void +wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg, bool gen_high_half) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none()); + if (gen_high_half) + emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2, + reg_none()); + else + wrp_immed(nfp_prog, dst_hi, 0); +} + +static void +wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg, + swreg rreg) +{ + emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg); + emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg); + emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none()); +} + +static int +wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + bool gen_high_half, bool ropnd_from_reg) +{ + swreg multiplier, multiplicand, dst_hi, dst_lo; + const struct bpf_insn *insn = &meta->insn; + u32 lopnd_max, ropnd_max; + u8 dst_reg; + + dst_reg = insn->dst_reg; + multiplicand = reg_a(dst_reg * 2); + dst_hi = reg_both(dst_reg * 2 + 1); + dst_lo = reg_both(dst_reg * 2); + lopnd_max = meta->umax_dst; + if (ropnd_from_reg) { + multiplier = reg_b(insn->src_reg * 2); + ropnd_max = meta->umax_src; + } else { + u32 imm = insn->imm; + + multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + ropnd_max = imm; + } + if (lopnd_max > U16_MAX || ropnd_max > U16_MAX) + wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier, + gen_high_half); + else + wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier); + + return 0; +} + +static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm) +{ + swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst); + struct reciprocal_value_adv rvalue; + u8 pre_shift, exp; + swreg magic; + + if (imm > U32_MAX) { + wrp_immed(nfp_prog, dst_both, 0); + return 0; + } + + /* NOTE: because we are using "reciprocal_value_adv" which doesn't + * support "divisor > (1u << 31)", we need to JIT separate NFP sequence + * to handle such case which actually equals to the result of unsigned + * comparison "dst >= imm" which could be calculated using the following + * NFP sequence: + * + * alu[--, dst, -, imm] + * immed[imm, 0] + * alu[dst, imm, +carry, 0] + * + */ + if (imm > 1U << 31) { + swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog)); + + emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b); + wrp_immed(nfp_prog, imm_a(nfp_prog), 0); + emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C, + reg_imm(0)); + return 0; + } + + rvalue = reciprocal_value_adv(imm, 32); + exp = rvalue.exp; + if (rvalue.is_wide_m && !(imm & 1)) { + pre_shift = fls(imm & -imm) - 1; + rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift); + } else { + pre_shift = 0; + } + magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog)); + if (imm == 1U << exp) { + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, exp); + } else if (rvalue.is_wide_m) { + wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a, + magic, true); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, 1); + emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD, + imm_b(nfp_prog)); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b, + SHF_SC_R_SHF, rvalue.sh - 1); + } else { + if (pre_shift) + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, pre_shift); + wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true); + emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, + dst_b, SHF_SC_R_SHF, rvalue.sh); + } + + return 0; +} + static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog); @@ -1684,6 +1866,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, true); +} + +static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, true, false); +} + +static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + + return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm); +} + +static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* NOTE: verifier hook has rejected cases for which verifier doesn't + * know whether the source operand is constant or not. + */ + return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src); +} + static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1772,8 +1979,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shl_imm64(nfp_prog, dst, umin); @@ -1881,8 +2088,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __shr_imm64(nfp_prog, dst, umin); @@ -1995,8 +2202,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) u8 dst, src; dst = insn->dst_reg * 2; - umin = meta->umin; - umax = meta->umax; + umin = meta->umin_src; + umax = meta->umax_src; if (umin == umax) return __ashr_imm64(nfp_prog, dst, umin); @@ -2097,6 +2304,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); } +static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, true); +} + +static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_mul(nfp_prog, meta, false, false); +} + +static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_reg64(nfp_prog, meta); +} + +static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return div_imm64(nfp_prog, meta); +} + static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { u8 dst = meta->insn.dst_reg * 2; @@ -2848,6 +3075,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, + [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64, + [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64, + [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64, + [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, @@ -2867,6 +3098,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_ADD | BPF_K] = add_imm, [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, + [BPF_ALU | BPF_MUL | BPF_X] = mul_reg, + [BPF_ALU | BPF_MUL | BPF_K] = mul_imm, + [BPF_ALU | BPF_DIV | BPF_X] = div_reg, + [BPF_ALU | BPF_DIV | BPF_K] = div_imm, [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, @@ -3299,7 +3534,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta, if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta)) return false; - if (ld_meta->ptr.type != PTR_TO_PACKET) + if (ld_meta->ptr.type != PTR_TO_PACKET && + ld_meta->ptr.type != PTR_TO_MAP_VALUE) return false; if (st_meta->ptr.type != PTR_TO_PACKET) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 40216d56dddc..994d2b756fe1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { bool running, xdp_running; - int ret; if (!nfp_net_ebpf_capable(nn)) return -EINVAL; running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; - xdp_running = running && nn->dp.bpf_offload_xdp; + xdp_running = running && nn->xdp_hw.prog; if (!prog && !xdp_running) return 0; if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running, extack); - /* Stop offload if replace not possible */ - if (ret) - return ret; - - nn->dp.bpf_offload_xdp = !!prog; - return ret; + return nfp_net_bpf_offload(nn, prog, running, extack); } static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) @@ -202,14 +195,11 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - if (tcf_block_shared(f->block)) - return -EOPNOTSUPP; - switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_bpf_setup_tc_block_cb, - nn, nn); + nn, nn, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_bpf_setup_tc_block_cb, @@ -411,6 +401,20 @@ err_release_free: return -EINVAL; } +static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_app_bpf *bpf = app->priv; + + return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev); +} + +static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_app_bpf *bpf = app->priv; + + bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev); +} + static int nfp_bpf_init(struct nfp_app *app) { struct nfp_app_bpf *bpf; @@ -434,6 +438,11 @@ static int nfp_bpf_init(struct nfp_app *app) if (err) goto err_free_neutral_maps; + bpf->bpf_dev = bpf_offload_dev_create(); + err = PTR_ERR_OR_ZERO(bpf->bpf_dev); + if (err) + goto err_free_neutral_maps; + return 0; err_free_neutral_maps: @@ -452,6 +461,7 @@ static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; + bpf_offload_dev_destroy(bpf->bpf_dev); WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); @@ -473,6 +483,9 @@ const struct nfp_app_type app_bpf = { .extra_cap = nfp_bpf_extra_cap, + .ndo_init = nfp_bpf_ndo_init, + .ndo_uninit = nfp_bpf_ndo_uninit, + .vnic_alloc = nfp_bpf_vnic_alloc, .vnic_free = nfp_bpf_vnic_free, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 654fe7823e5e..bec935468f90 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -110,6 +110,8 @@ enum pkt_vec { * struct nfp_app_bpf - bpf app priv structure * @app: backpointer to the app * + * @bpf_dev: BPF offload device handle + * * @tag_allocator: bitmap of control message tags in use * @tag_alloc_next: next tag bit to allocate * @tag_alloc_last: next tag bit to be freed @@ -150,6 +152,8 @@ enum pkt_vec { struct nfp_app_bpf { struct nfp_app *app; + struct bpf_offload_dev *bpf_dev; + DECLARE_BITMAP(tag_allocator, U16_MAX + 1); u16 tag_alloc_next; u16 tag_alloc_last; @@ -263,8 +267,10 @@ struct nfp_bpf_reg_state { * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions - * @umin: copy of core verifier umin_value. - * @umax: copy of core verifier umax_value. + * @umin_src: copy of core verifier umin_value for src opearnd. + * @umax_src: copy of core verifier umax_value for src operand. + * @umin_dst: copy of core verifier umin_value for dst opearnd. + * @umax_dst: copy of core verifier umax_value for dst operand. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -300,12 +306,15 @@ struct nfp_insn_meta { struct bpf_reg_state arg1; struct nfp_bpf_reg_state arg2; }; - /* We are interested in range info for some operands, - * for example, the shift amount. + /* We are interested in range info for operands of ALU + * operations. For example, shift amount, multiplicand and + * multiplier etc. */ struct { - u64 umin; - u64 umax; + u64 umin_src; + u64 umax_src; + u64 umin_dst; + u64 umax_dst; }; }; unsigned int off; @@ -339,6 +348,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) return BPF_MODE(meta->insn.code); } +static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU; +} + static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) { return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); @@ -384,23 +398,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } -static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) +static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta) { - u8 code = meta->insn.code; - bool is_alu, is_shift; - u8 opclass, opcode; - - opclass = BPF_CLASS(code); - is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; - if (!is_alu) - return false; - - opcode = BPF_OP(code); - is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; - if (!is_shift) - return false; + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL; +} - return BPF_SRC(code) == BPF_X; +static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) +{ + return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } /** diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 7eae4c0266f8..49b03f7dbf46 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -190,8 +190,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; - if (is_mbpf_indir_shift(meta)) - meta->umin = U64_MAX; + if (is_mbpf_alu(meta)) { + meta->umin_src = U64_MAX; + meta->umin_dst = U64_MAX; + } list_add_tail(&meta->l, &nfp_prog->insns); } @@ -564,14 +566,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, { int err; - if (prog) { - struct bpf_prog_offload *offload = prog->aux->offload; - - if (!offload) - return -EINVAL; - if (offload->netdev != nn->dp.netdev) - return -EINVAL; - } + if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev)) + return -EINVAL; if (prog && old_prog) { u8 cap; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 4bfeba7b21b2..49ba0d645d36 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -517,6 +517,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } static int +nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *sreg = + cur_regs(env) + meta->insn.src_reg; + const struct bpf_reg_state *dreg = + cur_regs(env) + meta->insn.dst_reg; + + meta->umin_src = min(meta->umin_src, sreg->umin_value); + meta->umax_src = max(meta->umax_src, sreg->umax_value); + meta->umin_dst = min(meta->umin_dst, dreg->umin_value); + meta->umax_dst = max(meta->umax_dst, dreg->umax_value); + + /* NFP supports u16 and u32 multiplication. + * + * For ALU64, if either operand is beyond u32's value range, we reject + * it. One thing to note, if the source operand is BPF_K, then we need + * to check "imm" field directly, and we'd reject it if it is negative. + * Because for ALU64, "imm" (with s32 type) is expected to be sign + * extended to s64 which NFP mul doesn't support. + * + * For ALU32, it is fine for "imm" be negative though, because the + * result is 32-bits and there is no difference on the low halve of + * the result for signed/unsigned mul, so we will get correct result. + */ + if (is_mbpf_mul(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "multiplier is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) { + pr_vlog(env, "multiplicand is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_class(meta) == BPF_ALU64 && + mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n"); + return -EINVAL; + } + } + + /* NFP doesn't have divide instructions, we support divide by constant + * through reciprocal multiplication. Given NFP support multiplication + * no bigger than u32, we'd require divisor and dividend no bigger than + * that as well. + * + * Also eBPF doesn't support signed divide and has enforced this on C + * language level by failing compilation. However LLVM assembler hasn't + * enforced this, so it is possible for negative constant to leak in as + * a BPF_K operand through assembly code, we reject such cases as well. + */ + if (is_mbpf_div(meta)) { + if (meta->umax_dst > U32_MAX) { + pr_vlog(env, "dividend is not within u32 value range\n"); + return -EINVAL; + } + if (mbpf_src(meta) == BPF_X) { + if (meta->umin_src != meta->umax_src) { + pr_vlog(env, "divisor is not constant\n"); + return -EINVAL; + } + if (meta->umax_src > U32_MAX) { + pr_vlog(env, "divisor is not within u32 value range\n"); + return -EINVAL; + } + } + if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) { + pr_vlog(env, "divide by negative constant is not supported\n"); + return -EINVAL; + } + } + + return 0; +} + +static int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; @@ -551,13 +627,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); - if (is_mbpf_indir_shift(meta)) { - const struct bpf_reg_state *sreg = - cur_regs(env) + meta->insn.src_reg; - - meta->umin = min(meta->umin, sreg->umin_value); - meta->umax = max(meta->umax, sreg->umax_value); - } + if (is_mbpf_alu(meta)) + return nfp_bpf_check_alu(nfp_prog, meta, env); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 4a6d2db75071..e56b815a8dc6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -34,6 +34,7 @@ #include <linux/bitfield.h> #include <net/pkt_cls.h> #include <net/switchdev.h> +#include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_pedit.h> @@ -44,6 +45,8 @@ #include "main.h" #include "../nfp_net_repr.h" +#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (TUNNEL_CSUM | TUNNEL_KEY) + static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); @@ -235,9 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); u32 tmp_set_ip_tun_type_index = 0; + struct flowi4 flow = {}; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + struct rtable *rt; struct net *net; + int err; if (ip_tun->options_len) return -EOPNOTSUPP; @@ -254,7 +260,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); set_tun->tun_id = ip_tun->key.tun_id; - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + + /* Do a route lookup to determine ttl - if fails then use default. + * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so + * must be defined here. + */ + flow.daddr = ip_tun->key.u.ipv4.dst; + flow.flowi4_proto = IPPROTO_UDP; + rt = ip_route_output_key(net, &flow); + err = PTR_ERR_OR_ZERO(rt); + if (!err) { + set_tun->ttl = ip4_dst_hoplimit(&rt->dst); + ip_rt_put(rt); + } else { + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + } + + set_tun->tos = ip_tun->key.tos; + + if (!(ip_tun->key.tun_flags & TUNNEL_KEY) || + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) + return -EOPNOTSUPP; + set_tun->tun_flags = ip_tun->key.tun_flags; /* Complete pre_tunnel action. */ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; @@ -398,8 +425,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, return 0; } +static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) +{ + switch (ip_proto) { + case 0: + /* Filter doesn't force proto match, + * both TCP and UDP will be updated if encountered + */ + return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; + case IPPROTO_TCP: + return TCA_CSUM_UPDATE_FLAG_TCP; + case IPPROTO_UDP: + return TCA_CSUM_UPDATE_FLAG_UDP; + default: + /* All other protocols will be ignored by FW */ + return 0; + } +} + static int -nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) +nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, + char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; @@ -409,6 +455,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) int idx, nkeys, err; size_t act_size; u32 offset, cmd; + u8 ip_proto = 0; memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); @@ -451,6 +498,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) return err; } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *basic; + + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + ip_proto = basic->ip_proto; + } + if (set_eth.head.len_lw) { act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); @@ -459,6 +515,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -471,18 +531,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_dst.head.len_lw) { act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_ip6_src.head.len_lw) { act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } else if (set_tport.head.len_lw) { act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } return 0; @@ -493,12 +565,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_output *output; int err, prelag_size; + /* If csum_updated has not been reset by now, it means HW will + * incorrectly update csums when they are not requested. + */ + if (*csum_updated) + return -EOPNOTSUPP; + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -529,10 +607,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, static int nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, + struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt) + int *out_cnt, u32 *csum_updated) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; @@ -545,14 +624,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, } else if (is_tcf_mirred_egress_redirect(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; } else if (is_tcf_mirred_egress_mirror(a)) { err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, - out_cnt); + out_cnt, csum_updated); if (err) return err; @@ -602,8 +681,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, /* Tunnel decap is handled by default so accept action. */ return 0; } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len)) + if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + a_len, csum_updated)) + return -EOPNOTSUPP; + } else if (is_tcf_csum(a)) { + /* csum action requests recalc of something we have not fixed */ + if (tcf_csum_update_flags(a) & ~*csum_updated) return -EOPNOTSUPP; + /* If we will correctly fix the csum we can remove it from the + * csum update list. Which will later be used to check support. + */ + *csum_updated &= ~tcf_csum_update_flags(a); } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; @@ -620,6 +708,7 @@ int nfp_flower_compile_action(struct nfp_app *app, int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; + u32 csum_updated = 0; LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -632,8 +721,9 @@ int nfp_flower_compile_action(struct nfp_app *app, tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt, &out_cnt); + err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + netdev, &tun_type, &tun_out_cnt, + &out_cnt, &csum_updated); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4a7f3510a296..15f1eacd76b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun { __be16 reserved; __be64 tun_id __packed; __be32 tun_type_index; - __be16 reserved2; + __be16 tun_flags; u8 ttl; - u8 reserved3; + u8 tos; __be32 extra[2]; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 0c4c957717ea..bf10598f66ae 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, if (lag_upper_info && lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || - (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && - lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) { can_offload = false; nfp_flower_cmsg_warn(priv->app, "Unable to offload tx_type %u hash %u\n", diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index bbe5764d26cb..ef2114d13387 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -73,7 +73,7 @@ struct nfp_app; struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; - struct timespec64 *last_used; + ktime_t *last_used; u8 init_unallocated; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 93fb809f50d1..c098730544b7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) { struct nfp_flower_priv *priv = app->priv; struct circ_buf *ring; - struct timespec64 now; ring = &priv->mask_ids.mask_id_free_list; /* Checking if buffer is full. */ @@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS); - getnstimeofday64(&now); - priv->mask_ids.last_used[mask_id] = now; + priv->mask_ids.last_used[mask_id] = ktime_get(); return 0; } @@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) { struct nfp_flower_priv *priv = app->priv; - struct timespec64 delta, now; + ktime_t reuse_timeout; struct circ_buf *ring; u8 temp_id, freed_id; @@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id) memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS); *mask_id = temp_id; - getnstimeofday64(&now); - delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]); + reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id], + NFP_FL_MASK_REUSE_TIME_NS); - if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS) + if (ktime_before(ktime_get(), reuse_timeout)) goto err_not_found; memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 525057bee0ed..6bc8a97f7e03 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -584,9 +584,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: return nfp_flower_get_stats(app, netdev, flower, egress); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, @@ -631,14 +631,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - if (tcf_block_shared(f->block)) - return -EOPNOTSUPP; - switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, - repr, repr); + repr, repr, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index f28b244f4ee7..69d4ae7a61f3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -86,6 +86,23 @@ const char *nfp_app_mip_name(struct nfp_app *app) return nfp_mip_name(app->pf->mip); } +int nfp_app_ndo_init(struct net_device *netdev) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (!app || !app->type->ndo_init) + return 0; + return app->type->ndo_init(app, netdev); +} + +void nfp_app_ndo_uninit(struct net_device *netdev) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + + if (app && app->type->ndo_uninit) + app->type->ndo_uninit(app, netdev); +} + u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data) { if (!port || !port->app || !port->app->type->port_get_stats) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index ee74caacb015..afbc19aa66a8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -78,6 +78,8 @@ extern const struct nfp_app_type app_abm; * @init: perform basic app checks and init * @clean: clean app state * @extra_cap: extra capabilities string + * @ndo_init: vNIC and repr netdev .ndo_init + * @ndo_uninit: vNIC and repr netdev .ndo_unint * @vnic_alloc: allocate vNICs (assign port types, etc.) * @vnic_free: free up app's vNIC state * @vnic_init: vNIC netdev was registered @@ -117,6 +119,9 @@ struct nfp_app_type { const char *(*extra_cap)(struct nfp_app *app, struct nfp_net *nn); + int (*ndo_init)(struct nfp_app *app, struct net_device *netdev); + void (*ndo_uninit)(struct nfp_app *app, struct net_device *netdev); + int (*vnic_alloc)(struct nfp_app *app, struct nfp_net *nn, unsigned int id); void (*vnic_free)(struct nfp_app *app, struct nfp_net *nn); @@ -200,6 +205,9 @@ static inline void nfp_app_clean(struct nfp_app *app) app->type->clean(app); } +int nfp_app_ndo_init(struct net_device *netdev); +void nfp_app_ndo_uninit(struct net_device *netdev); + static inline int nfp_app_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index f6677bc9875a..cdc4e065f6f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -426,4 +426,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset) return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR; } +enum mul_type { + MUL_TYPE_START = 0x00, + MUL_TYPE_STEP_24x8 = 0x01, + MUL_TYPE_STEP_16x16 = 0x02, + MUL_TYPE_STEP_32x32 = 0x03, +}; + +enum mul_step { + MUL_STEP_1 = 0x00, + MUL_STEP_NONE = MUL_STEP_1, + MUL_STEP_2 = 0x01, + MUL_STEP_3 = 0x02, + MUL_STEP_4 = 0x03, + MUL_LAST = 0x04, + MUL_LAST_2 = 0x05, +}; + +#define OP_MUL_BASE 0x0f800000000ULL +#define OP_MUL_A_SRC 0x000000003ffULL +#define OP_MUL_B_SRC 0x000000ffc00ULL +#define OP_MUL_STEP 0x00000700000ULL +#define OP_MUL_DST_AB 0x00000800000ULL +#define OP_MUL_SW 0x00040000000ULL +#define OP_MUL_TYPE 0x00180000000ULL +#define OP_MUL_WR_AB 0x20000000000ULL +#define OP_MUL_SRC_LMEXTN 0x40000000000ULL +#define OP_MUL_DST_LMEXTN 0x80000000000ULL + #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 152283d7e59c..4a540c5e27fe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -236,16 +236,20 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) int err; pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); - if (!err) - return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) { + /* For backwards compatibility if symbol not found allow all */ + pf->limit_vfs = ~0; + if (err == -ENOENT) + return 0; - pf->limit_vfs = ~0; - /* Allow any setting for backwards compatibility if symbol not found */ - if (err == -ENOENT) - return 0; + nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); + return err; + } - nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); - return err; + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + return 0; } static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 2a71a9ffd095..439e6ffe2f05 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -250,7 +250,7 @@ struct nfp_net_tx_ring { struct nfp_net_tx_desc *txds; dma_addr_t dma; - unsigned int size; + size_t size; bool is_xdp; } ____cacheline_aligned; @@ -350,9 +350,9 @@ struct nfp_net_rx_buf { * @qcp_fl: Pointer to base of the QCP freelist queue * @rxbufs: Array of transmitted FL/RX buffers * @rxds: Virtual address of FL/RX ring in host memory + * @xdp_rxq: RX-ring info avail for XDP * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) - * @xdp_rxq: RX-ring info avail for XDP */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -364,14 +364,15 @@ struct nfp_net_rx_ring { u32 idx; int fl_qcidx; - unsigned int size; u8 __iomem *qcp_fl; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; - dma_addr_t dma; struct xdp_rxq_info xdp_rxq; + + dma_addr_t dma; + size_t size; } ____cacheline_aligned; /** @@ -485,7 +486,6 @@ struct nfp_stat_pair { * @dev: Backpointer to struct device * @netdev: Backpointer to net_device structure * @is_vf: Is the driver attached to a VF? - * @bpf_offload_xdp: Offloaded BPF program is XDP * @chained_metadata_format: Firemware will use new metadata format * @rx_dma_dir: Mapping direction for RX buffers * @rx_dma_off: Offset at which DMA packets (for XDP headroom) @@ -510,7 +510,6 @@ struct nfp_net_dp { struct net_device *netdev; u8 is_vf:1; - u8 bpf_offload_xdp:1; u8 chained_metadata_format:1; u8 rx_dma_dir; @@ -553,8 +552,8 @@ struct nfp_net_dp { * @rss_cfg: RSS configuration * @rss_key: RSS secret key * @rss_itbl: RSS indirection table - * @xdp_flags: Flags with which XDP prog was loaded - * @xdp_prog: XDP prog (for ctrl path, both DRV and HW modes) + * @xdp: Information about the driver XDP program + * @xdp_hw: Information about the HW XDP program * @max_r_vecs: Number of allocated interrupt vectors for RX/TX * @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware @@ -610,8 +609,8 @@ struct nfp_net { u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ]; u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ]; - u32 xdp_flags; - struct bpf_prog *xdp_prog; + struct xdp_attachment_info xdp; + struct xdp_attachment_info xdp_hw; unsigned int max_tx_rings; unsigned int max_rx_rings; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d4c27f849f9b..7c1a921d178d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -53,6 +53,8 @@ #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/mm.h> +#include <linux/overflow.h> #include <linux/page_ref.h> #include <linux/pci.h> #include <linux/pci_regs.h> @@ -945,11 +947,12 @@ err_free: /** * nfp_net_tx_complete() - Handled completed TX packets - * @tx_ring: TX ring structure + * @tx_ring: TX ring structure + * @budget: NAPI budget (only used as bool to determine if in NAPI context) * * Return: Number of completed TX descriptors */ -static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) +static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; @@ -999,7 +1002,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) /* check for last gather fragment */ if (fidx == nr_frags - 1) - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); tx_ring->txbufs[idx].dma_addr = 0; tx_ring->txbufs[idx].skb = NULL; @@ -1077,7 +1080,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) * @dp: NFP Net data path struct * @tx_ring: TX ring structure * - * Assumes that the device is stopped + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) @@ -1119,7 +1122,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) tx_ring->rd_p++; } - memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); + memset(tx_ring->txds, 0, tx_ring->size); tx_ring->wr_p = 0; tx_ring->rd_p = 0; tx_ring->qcp_rd_p = 0; @@ -1279,13 +1282,18 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable * @rx_ring: RX ring structure * - * Warning: Do *not* call if ring buffers were never put on the FW freelist - * (i.e. device was not enabled)! + * Assumes that the device is stopped, must be idempotent. */ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) { unsigned int wr_idx, last_idx; + /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always + * kept at cnt - 1 FL bufs. + */ + if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) + return; + /* Move the empty entry to the end of the list */ wr_idx = D_IDX(rx_ring, rx_ring->wr_p); last_idx = rx_ring->cnt - 1; @@ -1294,7 +1302,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) rx_ring->rxbufs[last_idx].dma_addr = 0; rx_ring->rxbufs[last_idx].frag = NULL; - memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt); + memset(rx_ring->rxds, 0, rx_ring->size); rx_ring->wr_p = 0; rx_ring->rd_p = 0; } @@ -1709,8 +1717,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) } } - if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && - dp->bpf_offload_xdp) && !meta.portid) { + if (xdp_prog && !meta.portid) { void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; int act; @@ -1828,7 +1835,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) unsigned int pkts_polled = 0; if (r_vec->tx_ring) - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, budget); if (r_vec->rx_ring) pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); @@ -2062,7 +2069,7 @@ static void nfp_ctrl_poll(unsigned long arg) struct nfp_net_r_vector *r_vec = (void *)arg; spin_lock_bh(&r_vec->lock); - nfp_net_tx_complete(r_vec->tx_ring); + nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); spin_unlock_bh(&r_vec->lock); @@ -2121,7 +2128,7 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; - kfree(tx_ring->txbufs); + kvfree(tx_ring->txbufs); if (tx_ring->txds) dma_free_coherent(dp->dev, tx_ring->size, @@ -2145,18 +2152,17 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - int sz; tx_ring->cnt = dp->txd_cnt; - tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; + tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->txds) goto err_alloc; - sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt; - tx_ring->txbufs = kzalloc(sz, GFP_KERNEL); + tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), + GFP_KERNEL); if (!tx_ring->txbufs) goto err_alloc; @@ -2270,7 +2276,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) if (dp->netdev) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - kfree(rx_ring->rxbufs); + kvfree(rx_ring->rxbufs); if (rx_ring->rxds) dma_free_coherent(dp->dev, rx_ring->size, @@ -2293,7 +2299,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) static int nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - int sz, err; + int err; if (dp->netdev) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, @@ -2303,14 +2309,14 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) } rx_ring->cnt = dp->rxd_cnt; - rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; + rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->rxds) goto err_alloc; - sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt; - rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL); + rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), + GFP_KERNEL); if (!rx_ring->rxbufs) goto err_alloc; @@ -2508,6 +2514,8 @@ static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) /** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure + * + * Warning: must be fully idempotent. */ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { @@ -3115,6 +3123,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nfp_net_netpoll(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + /* nfp_net's NAPIs are statically allocated so even if there is a race + * with reconfig path this will simply try to schedule some disabled + * NAPI instances. + */ + for (i = 0; i < nn->dp.num_stack_tx_rings; i++) + napi_schedule_irqoff(&nn->r_vecs[i].napi); +} +#endif + static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3377,14 +3400,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev, nfp_net_set_vxlan_port(nn, idx, 0); } -static int -nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) { + struct bpf_prog *prog = bpf->prog; struct nfp_net_dp *dp; + int err; + + if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) + return -EBUSY; if (!prog == !nn->dp.xdp_prog) { WRITE_ONCE(nn->dp.xdp_prog, prog); + xdp_attachment_setup(&nn->xdp, bpf); return 0; } @@ -3398,38 +3425,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ - return nfp_net_ring_reconfig(nn, dp, extack); + err = nfp_net_ring_reconfig(nn, dp, bpf->extack); + if (err) + return err; + + xdp_attachment_setup(&nn->xdp, bpf); + return 0; } -static int -nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, - struct netlink_ext_ack *extack) +static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) { - struct bpf_prog *drv_prog, *offload_prog; int err; - if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES) + if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) return -EBUSY; - /* Load both when no flags set to allow easy activation of driver path - * when program is replaced by one which can't be offloaded. - */ - drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog; - offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog; - - err = nfp_net_xdp_setup_drv(nn, drv_prog, extack); + err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); if (err) return err; - err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack); - if (err && flags & XDP_FLAGS_HW_MODE) - return err; - - if (nn->xdp_prog) - bpf_prog_put(nn->xdp_prog); - nn->xdp_prog = prog; - nn->xdp_flags = flags; - + xdp_attachment_setup(&nn->xdp_hw, bpf); return 0; } @@ -3439,16 +3454,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: + return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: - return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags, - xdp->extack); + return nfp_net_xdp_setup_hw(nn, xdp); case XDP_QUERY_PROG: - xdp->prog_attached = !!nn->xdp_prog; - if (nn->dp.bpf_offload_xdp) - xdp->prog_attached = XDP_ATTACHED_HW; - xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; - xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; - return 0; + return xdp_attachment_query(&nn->xdp, xdp); + case XDP_QUERY_PROG_HW: + return xdp_attachment_query(&nn->xdp_hw, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } @@ -3476,12 +3488,17 @@ static int nfp_net_set_mac_address(struct net_device *netdev, void *addr) } const struct net_device_ops nfp_net_netdev_ops = { + .ndo_init = nfp_app_ndo_init, + .ndo_uninit = nfp_app_ndo_uninit, .ndo_open = nfp_net_netdev_open, .ndo_stop = nfp_net_netdev_close, .ndo_start_xmit = nfp_net_tx, .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nfp_net_netpoll, +#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 26d1cc4e2906..6a79c8e4a7a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static void nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct nfp_app *app; - - app = nfp_app_from_netdev(netdev); - if (!app) - return; + struct nfp_app *app = nfp_app_from_netdev(netdev); + strlcpy(drvinfo->bus_info, pci_name(app->pdev), + sizeof(drvinfo->bus_info)); nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } @@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS; + return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS; } static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) @@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) struct nfp_net *nn = netdev_priv(netdev); int i; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); data = nfp_pr_et(data, "rvec_%u_tx_busy", i); @@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) u64 tmp[NN_RVEC_GATHER_STATS]; unsigned int i, j; - for (i = 0; i < nn->dp.num_r_vecs; i++) { + for (i = 0; i < nn->max_r_vecs; i++) { unsigned int start; do { @@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) return data; } -static unsigned int -nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs) { - return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; + return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4; } static u8 * -nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, - unsigned int tx_rings, bool repr) +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr) { int swap_off, i; @@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) data = nfp_pr_et(data, nfp_net_et_stats[i].name); - for (i = 0; i < tx_rings; i++) { - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { data = nfp_pr_et(data, "rxq_%u_pkts", i); data = nfp_pr_et(data, "rxq_%u_bytes", i); + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } return data; } static u64 * -nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, - unsigned int rx_rings, unsigned int tx_rings) +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs) { unsigned int i; for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) *data++ = readq(mem + nfp_net_et_stats[i].off); - for (i = 0; i < tx_rings; i++) { - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); - *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); - } - - for (i = 0; i < rx_rings; i++) { + for (i = 0; i < num_vecs; i++) { *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); } return data; @@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); - data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, - nn->dp.num_tx_rings, + data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs, false); data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(nn->port, data); @@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_net *nn = netdev_priv(netdev); data = nfp_vnic_get_sw_stats(netdev, data); - data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, - nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs); data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(nn->port, data); } @@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: return nfp_vnic_get_sw_stats_count(netdev) + - nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, - nn->dp.num_tx_rings) + + nfp_vnic_get_hw_stats_count(nn->max_r_vecs) + nfp_mac_get_stats_count(netdev) + nfp_app_port_get_stats_count(nn->port); default: @@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev, switch (stringset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + data = nfp_vnic_get_hw_stats_strings(data, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); data = nfp_app_port_get_stats_strings(port, data); @@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, struct nfp_port *port = nfp_port_from_netdev(netdev); if (nfp_port_is_vnic(port)) - data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + data = nfp_vnic_get_hw_stats(data, port->vnic, 0); else data = nfp_mac_get_stats(netdev, data); data = nfp_app_port_get_stats(port, data); @@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: if (nfp_port_is_vnic(port)) - count = nfp_vnic_get_hw_stats_count(0, 0); + count = nfp_vnic_get_hw_stats_count(0); else count = nfp_mac_get_stats_count(netdev); count += nfp_app_port_get_stats_count(port); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d7b712f6362f..18a09cdcd9c6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -262,6 +262,8 @@ err_port_disable: } const struct net_device_ops nfp_repr_netdev_ops = { + .ndo_init = nfp_app_ndo_init, + .ndo_uninit = nfp_app_ndo_uninit, .ndo_open = nfp_repr_open, .ndo_stop = nfp_repr_stop, .ndo_start_xmit = nfp_repr_xmit, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 749655c329b2..c8d0b1016a64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp) kfree(nfp); } -static void nfp6000_read_serial(struct device *dev, u8 *serial) +static int nfp6000_read_serial(struct device *dev, u8 *serial) { struct pci_dev *pdev = to_pci_dev(dev); int pos; @@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); if (!pos) { - memset(serial, 0, NFP_SERIAL_LEN); - return; + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; } pci_read_config_dword(pdev, pos + 4, ®); put_unaligned_be16(reg >> 16, serial + 4); pci_read_config_dword(pdev, pos + 8, ®); put_unaligned_be32(reg, serial); + + return 0; } -static u16 nfp6000_get_interface(struct device *dev) +static int nfp6000_get_interface(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); int pos; u32 reg; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - if (!pos) - return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff); + if (!pos) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } pci_read_config_dword(pdev, pos + 4, ®); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index b0da3d436850..c338d539fa96 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -364,8 +364,8 @@ struct nfp_cpp_operations { int (*init)(struct nfp_cpp *cpp); void (*free)(struct nfp_cpp *cpp); - void (*read_serial)(struct device *dev, u8 *serial); - u16 (*get_interface)(struct device *dev); + int (*read_serial)(struct device *dev, u8 *serial); + int (*get_interface)(struct device *dev); int (*area_init)(struct nfp_cpp_area *area, u32 dest, unsigned long long address, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index ef30597aa319..73de57a09800 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, { const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); struct nfp_cpp *cpp; + int ifc, err; u32 mask[2]; u32 xpbaddr; size_t tgt; - int err; cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); if (!cpp) { @@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, cpp->op = ops; cpp->priv = priv; - cpp->interface = ops->get_interface(parent); - if (ops->read_serial) - ops->read_serial(parent, cpp->serial); + + ifc = ops->get_interface(parent); + if (ifc < 0) { + err = ifc; + goto err_free_cpp; + } + cpp->interface = ifc; + if (ops->read_serial) { + err = ops->read_serial(parent, cpp->serial); + if (err) + goto err_free_cpp; + } + rwlock_init(&cpp->resource_lock); init_waitqueue_head(&cpp->waitq); lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); @@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err = device_register(&cpp->dev); if (err < 0) { put_device(&cpp->dev); - goto err_dev; + goto err_free_cpp; } dev_set_drvdata(&cpp->dev, cpp); @@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, err_out: device_unregister(&cpp->dev); -err_dev: +err_free_cpp: kfree(cpp); err_malloc: return ERR_PTR(err); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 7cbd0174459c..1d9b0d44ddb6 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5777,7 +5777,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) (np->rx_ring_size + np->tx_ring_size), &np->ring_addr, - GFP_ATOMIC); + GFP_KERNEL); if (!np->rx_ring.orig) goto out_unmap; np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; @@ -5786,7 +5786,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), - &np->ring_addr, GFP_ATOMIC); + &np->ring_addr, GFP_KERNEL); if (!np->rx_ring.ex) goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile index 31288d4ad248..862de0f3bc41 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_PCH_GBE) += pch_gbe.o pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o -pch_gbe-y += pch_gbe_api.o pch_gbe_main.o +pch_gbe-y += pch_gbe_main.o diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 697e29dd4bd3..44c2f291e766 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -326,32 +326,6 @@ struct pch_gbe_regs { #define PCH_GBE_FC_FULL 3 #define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL - -struct pch_gbe_hw; -/** - * struct pch_gbe_functions - HAL APi function pointer - * @get_bus_info: for pch_gbe_hal_get_bus_info - * @init_hw: for pch_gbe_hal_init_hw - * @read_phy_reg: for pch_gbe_hal_read_phy_reg - * @write_phy_reg: for pch_gbe_hal_write_phy_reg - * @reset_phy: for pch_gbe_hal_phy_hw_reset - * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset - * @power_up_phy: for pch_gbe_hal_power_up_phy - * @power_down_phy: for pch_gbe_hal_power_down_phy - * @read_mac_addr: for pch_gbe_hal_read_mac_addr - */ -struct pch_gbe_functions { - void (*get_bus_info) (struct pch_gbe_hw *); - s32 (*init_hw) (struct pch_gbe_hw *); - s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); - s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); - void (*reset_phy) (struct pch_gbe_hw *); - void (*sw_reset_phy) (struct pch_gbe_hw *); - void (*power_up_phy) (struct pch_gbe_hw *hw); - void (*power_down_phy) (struct pch_gbe_hw *hw); - s32 (*read_mac_addr) (struct pch_gbe_hw *); -}; - /** * struct pch_gbe_mac_info - MAC information * @addr[6]: Store the MAC address @@ -394,17 +368,6 @@ struct pch_gbe_phy_info { /*! * @ingroup Gigabit Ether driver Layer - * @struct pch_gbe_bus_info - * @brief Bus information - */ -struct pch_gbe_bus_info { - u8 type; - u8 speed; - u8 width; -}; - -/*! - * @ingroup Gigabit Ether driver Layer * @struct pch_gbe_hw * @brief Hardware information */ @@ -414,10 +377,8 @@ struct pch_gbe_hw { struct pch_gbe_regs __iomem *reg; spinlock_t miim_lock; - const struct pch_gbe_functions *func; struct pch_gbe_mac_info mac; struct pch_gbe_phy_info phy; - struct pch_gbe_bus_info bus; }; /** @@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev); /* pch_gbe_mac.c */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data); #endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c deleted file mode 100644 index 51250363566b..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#include "pch_gbe.h" -#include "pch_gbe_phy.h" -#include "pch_gbe_api.h" - -/* bus type values */ -#define pch_gbe_bus_type_unknown 0 -#define pch_gbe_bus_type_pci 1 -#define pch_gbe_bus_type_pcix 2 -#define pch_gbe_bus_type_pci_express 3 -#define pch_gbe_bus_type_reserved 4 - -/* bus speed values */ -#define pch_gbe_bus_speed_unknown 0 -#define pch_gbe_bus_speed_33 1 -#define pch_gbe_bus_speed_66 2 -#define pch_gbe_bus_speed_100 3 -#define pch_gbe_bus_speed_120 4 -#define pch_gbe_bus_speed_133 5 -#define pch_gbe_bus_speed_2500 6 -#define pch_gbe_bus_speed_reserved 7 - -/* bus width values */ -#define pch_gbe_bus_width_unknown 0 -#define pch_gbe_bus_width_pcie_x1 1 -#define pch_gbe_bus_width_pcie_x2 2 -#define pch_gbe_bus_width_pcie_x4 4 -#define pch_gbe_bus_width_32 5 -#define pch_gbe_bus_width_64 6 -#define pch_gbe_bus_width_reserved 7 - -/** - * pch_gbe_plat_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) -{ - hw->bus.type = pch_gbe_bus_type_pci_express; - hw->bus.speed = pch_gbe_bus_speed_2500; - hw->bus.width = pch_gbe_bus_width_pcie_x1; -} - -/** - * pch_gbe_plat_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * Negative value: Failed-EBUSY - */ -static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) -{ - s32 ret_val; - - ret_val = pch_gbe_phy_get_id(hw); - if (ret_val) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); - return ret_val; - } - pch_gbe_phy_init_setting(hw); - /* Setup Mac interface option RGMII */ -#ifdef PCH_GBE_MAC_IFOP_RGMII - pch_gbe_phy_set_rgmii(hw); -#endif - return ret_val; -} - -static const struct pch_gbe_functions pch_gbe_ops = { - .get_bus_info = pch_gbe_plat_get_bus_info, - .init_hw = pch_gbe_plat_init_hw, - .read_phy_reg = pch_gbe_phy_read_reg_miic, - .write_phy_reg = pch_gbe_phy_write_reg_miic, - .reset_phy = pch_gbe_phy_hw_reset, - .sw_reset_phy = pch_gbe_phy_sw_reset, - .power_up_phy = pch_gbe_phy_power_up, - .power_down_phy = pch_gbe_phy_power_down, - .read_mac_addr = pch_gbe_mac_read_mac_addr -}; - -/** - * pch_gbe_plat_init_function_pointers - Init func ptrs - * @hw: Pointer to the HW structure - */ -static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) -{ - /* Set PHY parameter */ - hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Set function pointers */ - hw->func = &pch_gbe_ops; -} - -/** - * pch_gbe_hal_setup_init_funcs - Initializes function pointers - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) -{ - if (!hw->reg) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); - return -ENOSYS; - } - pch_gbe_plat_init_function_pointers(hw); - return 0; -} - -/** - * pch_gbe_hal_get_bus_info - Obtain bus information for adapter - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) -{ - if (!hw->func->get_bus_info) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->get_bus_info(hw); -} - -/** - * pch_gbe_hal_init_hw - Initialize hardware - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) -{ - if (!hw->func->init_hw) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->init_hw(hw); -} - -/** - * pch_gbe_hal_read_phy_reg - Reads PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The buffer to store the 16-bit read. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 *data) -{ - if (!hw->func->read_phy_reg) - return 0; - return hw->func->read_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_write_phy_reg - Writes PHY register - * @hw: Pointer to the HW structure - * @offset: The register to read - * @data: The value to write. - * Returns: - * 0: Successfully - * Negative value: Failed - */ -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, - u16 data) -{ - if (!hw->func->write_phy_reg) - return 0; - return hw->func->write_phy_reg(hw, offset, data); -} - -/** - * pch_gbe_hal_phy_hw_reset - Hard PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->reset_phy(hw); -} - -/** - * pch_gbe_hal_phy_sw_reset - Soft PHY reset - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) -{ - if (!hw->func->sw_reset_phy) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return; - } - hw->func->sw_reset_phy(hw); -} - -/** - * pch_gbe_hal_read_mac_addr - Reads MAC address - * @hw: Pointer to the HW structure - * Returns: - * 0: Successfully - * ENOSYS: Function is not registered - */ -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) -{ - if (!hw->func->read_mac_addr) { - struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); - - netdev_err(adapter->netdev, "ERROR: configuration\n"); - return -ENOSYS; - } - return hw->func->read_mac_addr(hw); -} - -/** - * pch_gbe_hal_power_up_phy - Power up PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_up_phy) - hw->func->power_up_phy(hw); -} - -/** - * pch_gbe_hal_power_down_phy - Power down PHY - * @hw: Pointer to the HW structure - */ -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) -{ - if (hw->func->power_down_phy) - hw->func->power_down_phy(hw); -} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h deleted file mode 100644 index 91ce07c8306c..000000000000 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. - * - * This code was derived from the Intel e1000e Linux driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ -#ifndef _PCH_GBE_API_H_ -#define _PCH_GBE_API_H_ - -#include "pch_gbe_phy.h" - -s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); -void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); -s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); -void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); -s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw); -void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw); - -#endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 731ce1e419e4..adaa0024adfe 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -17,7 +17,7 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" /** * pch_gbe_stats - Stats item information @@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev, u32 advertising; int ret; - pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); memcpy(©_ecmd, ecmd, sizeof(*ecmd)); @@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev, *regs_buff++ = ioread32(&hw->reg->INT_ST + i); /* PHY register */ for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) { - pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp); + pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp); *regs_buff++ = tmp; } } @@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev, err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); if (err) goto err_setup_tx; - /* save the new, restore the old in order to free it, - * then restore the new back again */ -#ifdef RINGFREE - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - pch_gbe_free_rx_resources(adapter, adapter->rx_ring); - pch_gbe_free_tx_resources(adapter, adapter->tx_ring); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rxdr; - adapter->tx_ring = txdr; -#else pch_gbe_free_rx_resources(adapter, rx_old); pch_gbe_free_tx_resources(adapter, tx_old); kfree(tx_old); kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; -#endif err = pch_gbe_up(adapter); } return err; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 34a1581eda95..43c0c10dfeb7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -18,7 +18,7 @@ */ #include "pch_gbe.h" -#include "pch_gbe_api.h" +#include "pch_gbe_phy.h" #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> @@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_DMA_ALIGN 0 #define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */ -#define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ @@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION; #define MINNOW_PHY_RESET_GPIO 13 -static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; - static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); @@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) * Returns: * 0: Successful. */ -s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) +static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; @@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) /* Read the MAC address. and store to the private data */ pch_gbe_mac_read_mac_addr(hw); iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); -#ifdef PCH_GBE_MAC_IFOP_RGMII iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); -#endif pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); /* Setup the receive addresses */ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); @@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count) pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); } - -/** - * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses - * @hw: Pointer to the HW structure - * @mc_addr_list: Array of multicast addresses to program - * @mc_addr_count: Number of multicast addresses to program - * @mar_used_count: The first MAC Address register free to program - * @mar_total_num: Total number of supported MAC Address Registers - */ -static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 mar_used_count, u32 mar_total_num) -{ - u32 i, adrmask; - - /* Load the first set of multicast addresses into the exact - * filters (RAR). If there are not enough to fill the RAR - * array, clear the filters. - */ - for (i = mar_used_count; i < mar_total_num; i++) { - if (mc_addr_count) { - pch_gbe_mac_mar_set(hw, mc_addr_list, i); - mc_addr_count--; - mc_addr_list += ETH_ALEN; - } else { - /* Clear MAC address mask */ - adrmask = ioread32(&hw->reg->ADDR_MASK); - iowrite32((adrmask | (0x0001 << i)), - &hw->reg->ADDR_MASK); - /* wait busy */ - pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); - /* Clear MAC address */ - iowrite32(0, &hw->reg->mac_adr[i].high); - iowrite32(0, &hw->reg->mac_adr[i].low); - } - } -} - /** * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings * @hw: Pointer to the HW structure @@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) void pch_gbe_reset(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + s32 ret_val; - pch_gbe_mac_reset_hw(&adapter->hw); + pch_gbe_mac_reset_hw(hw); /* reprogram multicast address register after reset */ pch_gbe_set_multi(netdev); /* Setup the receive address. */ - pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); - if (pch_gbe_hal_init_hw(&adapter->hw)) - netdev_err(netdev, "Hardware Error\n"); + pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES); + + ret_val = pch_gbe_phy_get_id(hw); + if (ret_val) { + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); + return; + } + pch_gbe_phy_init_setting(hw); + /* Setup Mac interface option RGMII */ + pch_gbe_phy_set_rgmii(hw); } /** @@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, unsigned long rgmii = 0; /* Set the RGMII control. */ -#ifdef PCH_GBE_MAC_IFOP_RGMII switch (speed) { case SPEED_10: rgmii = (PCH_GBE_RGMII_RATE_2_5M | @@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, break; } iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#else /* GMII */ - rgmii = 0; - iowrite32(rgmii, &hw->reg->RGMII_CTRL); -#endif } static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed, u16 duplex) @@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; - /* Initialize the hardware-specific values */ - if (pch_gbe_hal_setup_init_funcs(hw)) { - netdev_err(netdev, "Hardware Initialization Failure\n"); - return -EIO; - } if (pch_gbe_alloc_queues(adapter)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); if (err) goto err_setup_rx; - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); err = pch_gbe_up(adapter); if (err) goto err_up; @@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev) err_up: if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); @@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev) pch_gbe_down(adapter); if (!adapter->wake_up_evt) - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_free_tx_resources(adapter, adapter->tx_ring); pch_gbe_free_rx_resources(adapter, adapter->rx_ring); return 0; @@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev) struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; - u8 *mta_list; - u32 rctl; - int i; - int mc_count; + u32 rctl, adrmask; + int mc_count, i; netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); - /* Check for Promiscuous and All Multicast modes */ + /* By default enable address & multicast filtering */ rctl = ioread32(&hw->reg->RX_MODE); + rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN; + + /* Promiscuous mode disables all hardware address filtering */ + if (netdev->flags & IFF_PROMISC) + rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); + + /* If we want to monitor more multicast addresses than the hardware can + * support then disable hardware multicast filtering. + */ mc_count = netdev_mc_count(netdev); - if ((netdev->flags & IFF_PROMISC)) { - rctl &= ~PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else if ((netdev->flags & IFF_ALLMULTI)) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; + if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES) rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - if (mc_count >= PCH_GBE_MAR_ENTRIES) { - /* all the multicasting receive permissions */ - rctl |= PCH_GBE_ADD_FIL_EN; - rctl &= ~PCH_GBE_MLT_FIL_EN; - } else { - rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); - } - } + iowrite32(rctl, &hw->reg->RX_MODE); - if (mc_count >= PCH_GBE_MAR_ENTRIES) - return; - mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC); - if (!mta_list) + /* If we're not using multicast filtering then there's no point + * configuring the unused MAC address registers. + */ + if (!(rctl & PCH_GBE_MLT_FIL_EN)) return; - /* The shared function expects a packed array of only addresses. */ - i = 0; - netdev_for_each_mc_addr(ha, netdev) { - if (i == mc_count) - break; - memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN); + /* Load the first set of multicast addresses into MAC address registers + * for use by hardware filtering. + */ + i = 1; + netdev_for_each_mc_addr(ha, netdev) + pch_gbe_mac_mar_set(hw, ha->addr, i++); + + /* If there are spare MAC registers, mask & clear them */ + for (; i < PCH_GBE_MAR_ENTRIES; i++) { + /* Clear MAC address mask */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Clear MAC address */ + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); } - pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1, - PCH_GBE_MAR_ENTRIES); - kfree(mta_list); netdev_dbg(netdev, "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", @@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D0, 0); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake up status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } else { - pch_gbe_hal_power_down_phy(hw); + pch_gbe_phy_power_down(hw); pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } @@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device) return err; } pci_set_master(pdev); - pch_gbe_hal_power_up_phy(hw); + pch_gbe_phy_power_up(hw); pch_gbe_reset(adapter); /* Clear wake on lan control and status */ pch_gbe_mac_set_wol_event(hw, 0); @@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); free_netdev(netdev); } @@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "PHY initialize error\n"); goto err_free_adapter; } - pch_gbe_hal_get_bus_info(&adapter->hw); /* Read the MAC address. and store to the private data */ - ret = pch_gbe_hal_read_mac_addr(&adapter->hw); + ret = pch_gbe_mac_read_mac_addr(&adapter->hw); if (ret) { dev_err(&pdev->dev, "MAC address Read Error\n"); goto err_free_adapter; @@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, return 0; err_free_adapter: - pch_gbe_hal_phy_hw_reset(&adapter->hw); + pch_gbe_phy_hw_reset(&adapter->hw); err_free_netdev: free_netdev(netdev); return ret; @@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = { .shutdown = pch_gbe_shutdown, .err_handler = &pch_gbe_err_handler }; - - -static int __init pch_gbe_init_module(void) -{ - int ret; - - pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); - ret = pci_register_driver(&pch_gbe_driver); - if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { - if (copybreak == 0) { - pr_info("copybreak disabled\n"); - } else { - pr_info("copybreak enabled for packets <= %u bytes\n", - copybreak); - } - } - return ret; -} - -static void __exit pch_gbe_exit_module(void) -{ - pci_unregister_driver(&pch_gbe_driver); -} - -module_init(pch_gbe_init_module); -module_exit(pch_gbe_exit_module); +module_pci_driver(pch_gbe_driver); MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); @@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); -module_param(copybreak, uint, 0644); -MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); - /* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index a5cad5ea9436..6b35b573beef 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) * pch_gbe_phy_sw_reset - PHY software reset * @hw: Pointer to the HW structure */ -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) +static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) { u16 phy_ctrl; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 95ad0151ad02..23ac38711619 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -21,12 +21,10 @@ #define PCH_GBE_PHY_REGS_LEN 32 #define PCH_GBE_PHY_RESET_DELAY_US 10 -#define PCH_GBE_MAC_IFOP_RGMII s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data); -void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw); void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index b5ea2a56106e..1df28f2edd1f 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -2,7 +2,7 @@ # Packet engine device configuration # -config NET_PACKET_ENGINE +config NET_VENDOR_PACKET_ENGINES bool "Packet Engine devices" default y depends on PCI @@ -14,7 +14,7 @@ config NET_PACKET_ENGINE the questions about packet engine devices. If you say Y, you will be asked for your specific card in the following questions. -if NET_PACKET_ENGINE +if NET_VENDOR_PACKET_ENGINES config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" @@ -40,4 +40,4 @@ config YELLOWFIN To compile this driver as a module, choose M here: the module will be called yellowfin. This is recommended. -endif # NET_PACKET_ENGINE +endif # NET_VENDOR_PACKET_ENGINES diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 1cd39c9a0345..52ad80621335 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -566,9 +566,8 @@ static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { - u32 i, producer, consumer; + u32 i, producer; struct netxen_cmd_buffer *pbuf; - struct cmd_desc_type0 *cmd_desc; struct nx_host_tx_ring *tx_ring; i = 0; @@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; - consumer = tx_ring->sw_consumer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); @@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, } do { - cmd_desc = &cmd_desc_arr[i]; - pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; @@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, static int netxen_parse_md_template(struct netxen_adapter *adapter) { int num_of_entries, buff_level, e_cnt, esize; - int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0; + int rv = 0, sane_start = 0, sane_end = 0; char *dbuff; void *template_buff = adapter->mdump.md_template; char *dump_buff = adapter->mdump.md_capture_buff; @@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter) break; case RDEND: entry->hdr.driver_flags |= NX_DUMP_SKIP; - if (!sane_end) - end_cnt = e_cnt; sane_end += 1; break; case CNTRL: diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 8259e8309320..69aa7fc392c5 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; u32 producer; - int frag_count, no_of_desc; + int frag_count; u32 num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; @@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) frag_count = 1 + skb_shinfo(skb)->nr_frags; } - /* 4 fragments per cmd des */ - no_of_desc = (frag_count + 3) >> 2; if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index b5b5ff725426..f1977aa440e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1531,7 +1531,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, } /* CM PF */ -void qed_cm_init_pf(struct qed_hwfn *p_hwfn) +static void qed_cm_init_pf(struct qed_hwfn *p_hwfn) { /* XCM pure-LB queue */ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index e0680ce91328..d02e774c8d66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; - char *name; int i; for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { @@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, continue; personality = qed_dcbx_app_update[i].personality; - name = qed_dcbx_app_update[i].name; qed_dcbx_set_params(p_data, p_info, enable, prio, tc, type, personality); @@ -869,7 +867,7 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, return rc; } -void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) { struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; void *cookie = hwfn->cdev->ops_cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 4340c4c90bcb..1aa9fc1c5890 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7838,8 +7838,8 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } -int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, - enum qed_nvm_images image_id, u32 *length) +static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) { struct qed_nvm_image_att image_att; int rc; @@ -7854,8 +7854,9 @@ int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, return rc; } -int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, - u32 *num_dumped_bytes, enum qed_nvm_images image_id) +static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, + enum qed_nvm_images image_id) { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->dbg_params.engine_for_debug]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e5249b4741d0..6a0b46f214f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -230,12 +230,12 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) } /* Getters for resource amounts necessary for qm initialization */ -u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) +static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) { return p_hwfn->hw_info.num_hw_tc; } -u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) { return IS_QED_SRIOV(p_hwfn->cdev) ? p_hwfn->cdev->p_iov_info->total_vfs : 0; @@ -243,7 +243,7 @@ u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) #define NUM_DEFAULT_RLS 1 -u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) { u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); @@ -261,7 +261,7 @@ u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) return num_pf_rls; } -u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); @@ -273,7 +273,7 @@ u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) } /* calc amount of PQs according to the requested flags */ -u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) +static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) { u32 pq_flags = qed_get_pq_flags(p_hwfn); @@ -507,16 +507,6 @@ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } -u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl) -{ - u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn); - - if (rl > max_rl) - DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl); - - return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; -} - /* Functions for creating specific types of pqs */ static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index bee10c1781fb..8faceb691657 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12444,6 +12444,8 @@ struct public_drv_mb { #define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 #define DRV_MSG_CODE_STATS_TYPE_RDMA 4 +#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 + #define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 #define DRV_MSG_CODE_BIST_TEST 0x001e0000 @@ -12543,6 +12545,15 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 + /* Resource Allocation params - Driver version support */ #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 #define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 @@ -12596,6 +12607,9 @@ struct public_drv_mb { #define FW_MSG_CODE_PHY_OK 0x00110000 #define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_CODE_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 #define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 #define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 @@ -12687,6 +12701,8 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +#define MAX_I2C_TRANSACTION_SIZE 16 + /* OCBB definitions */ enum tlvs { /* Category 1: Device Properties */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index d845badf9b90..d6430dfebd83 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1225,19 +1225,6 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) 0); } -void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - u32 rfs_cm_hdr_event_id; - - /* Set RFS event ID to be awakened i Tstorm By Prs */ - rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); - rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << - PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; - rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << - PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); -} - void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index c0d4a54a5edb..1135387bd99d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -873,8 +873,8 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); } -void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, - struct qed_iscsi_conn *p_conn) +static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) { qed_chain_free(p_hwfn->cdev, &p_conn->xhq); qed_chain_free(p_hwfn->cdev, &p_conn->uhq); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 90a2b53096e2..17f3dfa2cc94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -377,7 +377,7 @@ qed_iwarp2roce_state(enum qed_iwarp_qp_state state) } } -const char *iwarp_state_names[] = { +const static char *iwarp_state_names[] = { "IDLE", "RTS", "TERMINATE", @@ -942,7 +942,7 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } -void +static void qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct mpa_v2_hdr *mpa_v2_params; @@ -967,7 +967,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) mpa_data_size; } -void +static void qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct qed_iwarp_cm_event_params params; @@ -2500,7 +2500,7 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, /* The only slowpath for iwarp ll2 is unalign flush. When this completion * is received, need to reset the FPDU. */ -void +static void qed_iwarp_ll2_slowpath(void *cxt, u8 connection_handle, u32 opaque_data_0, u32 opaque_data_1) @@ -2803,8 +2803,9 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return qed_iwarp_ll2_stop(p_hwfn, p_ptt); } -void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, - struct qed_iwarp_ep *ep, u8 fw_return_code) +static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + u8 fw_return_code) { struct qed_iwarp_cm_event_params params; @@ -2824,8 +2825,9 @@ void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, ep->event_cb(ep->cb_context, ¶ms); } -void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, - struct qed_iwarp_ep *ep, int fw_ret_code) +static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + int fw_ret_code) { struct qed_iwarp_cm_event_params params; bool event_cb = false; @@ -2954,7 +2956,7 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, } } -void +static void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 012973d75ad0..14ac9cab2653 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -158,7 +158,8 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) qed_ll2_dealloc_buffer(cdev, buffer); } -void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) +static void qed_ll2b_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_buffer *buffer = data->cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 758a9a5127fa..dbe81310c0b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2102,6 +2102,28 @@ out: return status; } +static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, + u8 dev_addr, u32 offset, u32 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, + offset, len, buf); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, @@ -2144,6 +2166,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .read_module_eeprom = &qed_read_module_eeprom, }; void qed_get_protocol_stats(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index cdd645024a32..8e4f60e4520a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -570,12 +570,13 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) +static int +qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) { struct qed_mcp_mb_params mb_params; int rc; @@ -2473,6 +2474,55 @@ out: return rc; } +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; + u32 resp, param; + int rc; + + nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; + nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; + + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset)); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return -ENODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return -EINVAL; + + offset += buf_size; + bytes_left -= buf_size; + } + + return 0; +} + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; @@ -2959,7 +3009,7 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, return rc; } -int +static int __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 632a838f1fe3..047976d5c6e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -840,6 +840,22 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); /** + * @brief Read from sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to read into + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); + +/** * @brief indicates whether the MFW objects [under mcp_info] are accessible * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 101d677114f2..be941cfaa2d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -134,7 +134,7 @@ static bool qed_bmap_is_empty(struct qed_bmap *bmap) return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count); } -u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) { /* First sb id for RoCE is after all the l2 sb */ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; @@ -706,7 +706,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, return qed_rdma_start_fw(p_hwfn, params, p_ptt); } -int qed_rdma_stop(void *rdma_cxt) +static int qed_rdma_stop(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_close_func_ramrod_data *p_ramrod; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index b5ce1581645f..ada4c1810864 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -157,7 +157,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) return flavor; } -void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) +static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) { spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 26e918d7f2f9..9b08a9d9e151 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -672,8 +672,8 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) return 0; } -bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, - int vfid, bool b_fail_malicious) +static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, + int vfid, bool b_fail_malicious) { /* Check PF supports sriov */ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || @@ -687,7 +687,7 @@ bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, return true; } -bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) { return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); } @@ -3979,7 +3979,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, } } -void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) +static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) { int i; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index be6ddde1a104..3d4269659820 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -169,7 +169,7 @@ static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, p_qid_tlv->qid = p_cid->qid_usage_idx; } -int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) +static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_def_resp_tlv *resp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f4a0f8ff8261..b37857f3f950 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1780,6 +1780,92 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +static int qede_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct qede_dev *edev = netdev_priv(dev); + u8 buf[4]; + int rc; + + /* Read first 4 bytes to find the sfp type */ + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, 0, 4); + if (rc) { + DP_ERR(edev, "Failed reading EEPROM data %d\n", rc); + return rc; + } + + switch (buf[0]) { + case 0x3: /* SFP, SFP+, SFP-28 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case 0xc: /* QSFP */ + case 0xd: /* QSFP+ */ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case 0x11: /* QSFP-28 */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]); + return -EINVAL; + } + + return 0; +} + +static int qede_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 start_addr = ee->offset, size = 0; + u8 *buf = data; + int rc = 0; + + /* Read A0 section */ + if (ee->offset < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN) + size = ETH_MODULE_SFF_8079_LEN - ee->offset; + else + size = ee->len; + + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A0, + start_addr, size); + if (rc) { + DP_ERR(edev, "Failed reading A0 section %d\n", rc); + return rc; + } + + buf += size; + start_addr += size; + } + + /* Read A2 section */ + if (start_addr >= ETH_MODULE_SFF_8079_LEN && + start_addr < ETH_MODULE_SFF_8472_LEN) { + size = ee->len - size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + size > ETH_MODULE_SFF_8472_LEN) + size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, + QED_I2C_DEV_ADDR_A2, + start_addr, size); + if (rc) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Failed reading A2 section %d\n", rc); + return 0; + } + } + + return rc; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1813,6 +1899,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_module_info = qede_get_module_info, + .get_module_eeprom = qede_get_module_eeprom, .get_eee = qede_get_eee, .set_eee = qede_set_eee, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b823bfe2ea4d..f9a327c821eb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1116,7 +1116,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); case XDP_QUERY_PROG: - xdp->prog_attached = !!edev->xdp_prog; xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; return 0; default: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 0c744b9c6e0a..77e386ebff09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) vp->max_tx_bw = MAX_BW; vp->min_tx_bw = MIN_BW; vp->spoofchk = false; - random_ether_addr(vp->mac); + eth_random_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", vp->mac, i); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index b9a7548ec6a0..0afc3d335d56 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->netdev_ops = &rmnet_vnd_ops; rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; - random_ether_addr(rmnet_dev->dev_addr); + eth_random_addr(rmnet_dev->dev_addr); rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; /* Raw IP mode */ diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 7c69f4c8134d..e1cd934c2e4f 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -99,7 +99,7 @@ config R8169 depends on PCI select FW_LOADER select CRC32 - select MII + select PHYLIB ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index eaedc11ed686..8ea1fa36ca43 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -15,7 +15,7 @@ #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> -#include <linux/mii.h> +#include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/firmware.h> -#include <linux/pci-aspm.h> #include <linux/prefetch.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -35,7 +34,6 @@ #define RTL8169_VERSION "2.3LK-NAPI" #define MODULENAME "r8169" -#define PFX MODULENAME ": " #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" @@ -57,19 +55,6 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" -#ifdef RTL8169_DEBUG -#define assert(expr) \ - if (!(expr)) { \ - printk( "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr,__FILE__,__func__,__LINE__); \ - } -#define dprintk(fmt, args...) \ - do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) -#else -#define assert(expr) do {} while (0) -#define dprintk(fmt, args...) do {} while (0) -#endif /* RTL8169_DEBUG */ - #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) @@ -95,7 +80,6 @@ static const int multicast_filter_limit = 32; #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RTL8169_TX_TIMEOUT (6*HZ) -#define RTL8169_PHY_TIMEOUT (10*HZ) /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) @@ -399,12 +383,6 @@ enum rtl_registers { FuncForceEvent = 0xfc, }; -enum rtl8110_registers { - TBICSR = 0x64, - TBI_ANAR = 0x68, - TBI_LPAR = 0x6a, -}; - enum rtl8168_8101_registers { CSIDR = 0x64, CSIAR = 0x68, @@ -571,14 +549,6 @@ enum rtl_register_content { PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ ASPM_en = (1 << 0), /* ASPM enable */ - /* TBICSR p.28 */ - TBIReset = 0x80000000, - TBILoopback = 0x40000000, - TBINwEnable = 0x20000000, - TBINwRestart = 0x10000000, - TBILinkOk = 0x02000000, - TBINwComplete = 0x01000000, - /* CPlusCmd p.31 */ EnableBist = (1 << 15), // 8168 8101 Mac_dbgo_oe = (1 << 14), // 8168 8101 @@ -732,7 +702,6 @@ enum rtl_flag { RTL_FLAG_TASK_ENABLED, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_PHY_PENDING, RTL_FLAG_MAX }; @@ -760,7 +729,6 @@ struct rtl8169_private { dma_addr_t RxPhyAddr; void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ - struct timer_list timer; u16 cp_cmd; u16 event_slow; @@ -776,14 +744,7 @@ struct rtl8169_private { void (*disable)(struct rtl8169_private *); } jumbo_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); - int (*get_link_ksettings)(struct net_device *, - struct ethtool_link_ksettings *); - void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct rtl8169_private *tp); - unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); - unsigned int (*link_ok)(struct rtl8169_private *tp); - int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); struct { @@ -792,7 +753,8 @@ struct rtl8169_private { struct work_struct work; } wk; - struct mii_if_info mii; + unsigned supports_gmii:1; + struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; @@ -1143,21 +1105,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) rtl_writephy(tp, reg_addr, (val & ~m) | p); } -static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, - int val) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - rtl_writephy(tp, location, val); -} - -static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return rtl_readphy(tp, location); -} - DECLARE_RTL_COND(rtl_ephyar_cond) { return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG; @@ -1478,54 +1425,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) RTL_R8(tp, ChipCmd); } -static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBIReset; -} - -static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) -{ - return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; -} - -static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp) -{ - return RTL_R32(tp, TBICSR) & TBILinkOk; -} - -static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp) -{ - return RTL_R8(tp, PHYstatus) & LinkStatus; -} - -static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) -{ - RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset); -} - -static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) -{ - unsigned int val; - - val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; - rtl_writephy(tp, MII_BMCR, val & 0xffff); -} - static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return; if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, ERIAR_EXGMAC); - } else if (RTL_R8(tp, PHYstatus) & _100bps) { + } else if (phydev->speed == SPEED_100) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1543,7 +1458,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { - if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + if (phydev->speed == SPEED_1000) { rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, @@ -1555,7 +1470,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) ERIAR_EXGMAC); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { - if (RTL_R8(tp, PHYstatus) & _10bps) { + if (phydev->speed == SPEED_10) { rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, ERIAR_EXGMAC); rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, @@ -1567,25 +1482,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } } -static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp) -{ - struct device *d = tp_to_dev(tp); - - if (tp->link_ok(tp)) { - rtl_link_chg_patch(tp); - /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(d); - netif_carrier_on(dev); - if (net_ratelimit()) - netif_info(tp, ifup, dev, "link up\n"); - } else { - netif_carrier_off(dev); - netif_info(tp, ifdown, dev, "link down\n"); - pm_runtime_idle(d); - } -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -1626,21 +1522,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - - pm_runtime_get_noresume(d); rtl_lock_work(tp); - wol->supported = WAKE_ANY; - if (pm_runtime_active(d)) - wol->wolopts = __rtl8169_get_wol(tp); - else - wol->wolopts = tp->saved_wolopts; - + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); - - pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1716,18 +1602,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct rtl8169_private *tp = netdev_priv(dev); struct device *d = tp_to_dev(tp); + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + pm_runtime_get_noresume(d); rtl_lock_work(tp); + tp->saved_wolopts = wol->wolopts; + if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, wol->wolopts); - else - tp->saved_wolopts = wol->wolopts; + __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - device_set_wakeup_enable(d, wol->wolopts); + device_set_wakeup_enable(d, tp->saved_wolopts); pm_runtime_put_noidle(d); @@ -1759,124 +1648,6 @@ static int rtl8169_get_regs_len(struct net_device *dev) return R8169_REGS_SIZE; } -static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 ignored) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret = 0; - u32 reg; - - reg = RTL_R32(tp, TBICSR); - if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && - (duplex == DUPLEX_FULL)) { - RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart)); - } else if (autoneg == AUTONEG_ENABLE) - RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart); - else { - netif_warn(tp, link, dev, - "incorrect speed setting refused in TBI mode\n"); - ret = -EOPNOTSUPP; - } - - return ret; -} - -static int rtl8169_set_speed_xmii(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 adv) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int giga_ctrl, bmcr; - int rc = -EINVAL; - - rtl_writephy(tp, 0x1f, 0x0000); - - if (autoneg == AUTONEG_ENABLE) { - int auto_nego; - - auto_nego = rtl_readphy(tp, MII_ADVERTISE); - auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - - if (adv & ADVERTISED_10baseT_Half) - auto_nego |= ADVERTISE_10HALF; - if (adv & ADVERTISED_10baseT_Full) - auto_nego |= ADVERTISE_10FULL; - if (adv & ADVERTISED_100baseT_Half) - auto_nego |= ADVERTISE_100HALF; - if (adv & ADVERTISED_100baseT_Full) - auto_nego |= ADVERTISE_100FULL; - - auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - - giga_ctrl = rtl_readphy(tp, MII_CTRL1000); - giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - - /* The 8100e/8101e/8102e do Fast Ethernet only. */ - if (tp->mii.supports_gmii) { - if (adv & ADVERTISED_1000baseT_Half) - giga_ctrl |= ADVERTISE_1000HALF; - if (adv & ADVERTISED_1000baseT_Full) - giga_ctrl |= ADVERTISE_1000FULL; - } else if (adv & (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full)) { - netif_info(tp, link, dev, - "PHY does not support 1000Mbps\n"); - goto out; - } - - bmcr = BMCR_ANENABLE | BMCR_ANRESTART; - - rtl_writephy(tp, MII_ADVERTISE, auto_nego); - rtl_writephy(tp, MII_CTRL1000, giga_ctrl); - } else { - if (speed == SPEED_10) - bmcr = 0; - else if (speed == SPEED_100) - bmcr = BMCR_SPEED100; - else - goto out; - - if (duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; - } - - rtl_writephy(tp, MII_BMCR, bmcr); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03) { - if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { - rtl_writephy(tp, 0x17, 0x2138); - rtl_writephy(tp, 0x0e, 0x0260); - } else { - rtl_writephy(tp, 0x17, 0x2108); - rtl_writephy(tp, 0x0e, 0x0000); - } - } - - rc = 0; -out: - return rc; -} - -static int rtl8169_set_speed(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex, u32 advertising) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); - if (ret < 0) - goto out; - - if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full) && - !pci_is_pcie(tp->pci_dev)) { - mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); - } -out: - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -1940,76 +1711,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } -static int rtl8169_get_link_ksettings_tbi(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - u32 status; - u32 supported, advertising; - - supported = - SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; - cmd->base.port = PORT_FIBRE; - - status = RTL_R32(tp, TBICSR); - advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; - cmd->base.autoneg = !!(status & TBINwEnable); - - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; /* Always set */ - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - - return 0; -} - -static int rtl8169_get_link_ksettings_xmii(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - mii_ethtool_get_link_ksettings(&tp->mii, cmd); - - return 0; -} - -static int rtl8169_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - - rtl_lock_work(tp); - rc = tp->get_link_ksettings(dev, cmd); - rtl_unlock_work(tp); - - return rc; -} - -static int rtl8169_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int rc; - u32 advertising; - - if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising)) - return -EINVAL; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex, advertising); - rtl_unlock_work(tp); - - return rc; -} - static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2185,13 +1886,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int rtl8169_nway_reset(struct net_device *dev) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - return mii_nway_restart(&tp->mii); -} - /* * Interrupt coalescing * @@ -2264,7 +1958,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) const struct rtl_coalesce_info *ci; int rc; - rc = rtl8169_get_link_ksettings(dev, &ecmd); + rc = phy_ethtool_get_link_ksettings(dev, &ecmd); if (rc < 0) return ERR_PTR(rc); @@ -2422,9 +2116,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, - .nway_reset = rtl8169_nway_reset, - .get_link_ksettings = rtl8169_get_link_ksettings, - .set_link_ksettings = rtl8169_set_link_ksettings, + .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -2537,15 +2231,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_45 : RTL_GIGA_MAC_VER_47; } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->mii.supports_gmii ? + tp->mac_version = tp->supports_gmii ? RTL_GIGA_MAC_VER_46 : RTL_GIGA_MAC_VER_48; } @@ -2553,7 +2247,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, static void rtl8169_print_mac_version(struct rtl8169_private *tp) { - dprintk("mac_version = 0x%02x\n", tp->mac_version); + netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); } struct phy_reg { @@ -4405,62 +4099,16 @@ static void rtl_hw_phy_config(struct net_device *dev) } } -static void rtl_phy_work(struct rtl8169_private *tp) -{ - struct timer_list *timer = &tp->timer; - unsigned long timeout = RTL8169_PHY_TIMEOUT; - - assert(tp->mac_version > RTL_GIGA_MAC_VER_01); - - if (tp->phy_reset_pending(tp)) { - /* - * A busy loop could burn quite a few cycles on nowadays CPU. - * Let's delay the execution of the timer for a few ticks. - */ - timeout = HZ/10; - goto out_mod_timer; - } - - if (tp->link_ok(tp)) - return; - - netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); - - tp->phy_reset_enable(tp); - -out_mod_timer: - mod_timer(timer, jiffies + timeout); -} - static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) { if (!test_and_set_bit(flag, tp->wk.flags)) schedule_work(&tp->wk.work); } -static void rtl8169_phy_timer(struct timer_list *t) -{ - struct rtl8169_private *tp = from_timer(tp, t, timer); - - rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); -} - -DECLARE_RTL_COND(rtl_phy_reset_cond) -{ - return tp->phy_reset_pending(tp); -} - -static void rtl8169_phy_reset(struct net_device *dev, - struct rtl8169_private *tp) -{ - tp->phy_reset_enable(tp); - rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); -} - static bool rtl_tbi_enabled(struct rtl8169_private *tp) { return (tp->mac_version == RTL_GIGA_MAC_VER_01) && - (RTL_R8(tp, PHYstatus) & TBI_Enable); + (RTL_R8(tp, PHYstatus) & TBI_Enable); } static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) @@ -4468,7 +4116,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl_hw_phy_config(dev); if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); } @@ -4478,23 +4127,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); if (tp->mac_version == RTL_GIGA_MAC_VER_02) { - dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + netif_dbg(tp, drv, dev, + "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + netif_dbg(tp, drv, dev, + "Set PHY Reg 0x0bh = 0x00h\n"); rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 } - rtl8169_phy_reset(dev, tp); + /* We may have called phy_speed_down before */ + phy_speed_up(dev->phydev); - rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0)); - - if (rtl_tbi_enabled(tp)) - netif_info(tp, link, dev, "TBI auto-negotiating\n"); + genphy_soft_reset(dev->phydev); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4539,34 +4183,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct rtl8169_private *tp = netdev_priv(dev); - struct mii_ioctl_data *data = if_mii(ifr); - - return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; -} - -static int rtl_xmii_ioctl(struct rtl8169_private *tp, - struct mii_ioctl_data *data, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - data->phy_id = 32; /* Internal PHY */ - return 0; - - case SIOCGMIIREG: - data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); - return 0; - - case SIOCSMIIREG: - rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); - return 0; - } - return -EOPNOTSUPP; -} + if (!netif_running(dev)) + return -ENODEV; -static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) -{ - return -EOPNOTSUPP; + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@ -4594,30 +4214,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) } } -static void rtl_speed_down(struct rtl8169_private *tp) -{ - u32 adv; - int lpa; - - rtl_writephy(tp, 0x1f, 0x0000); - lpa = rtl_readphy(tp, MII_LPA); - - if (lpa & (LPA_10HALF | LPA_10FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - else if (lpa & (LPA_100HALF | LPA_100FULL)) - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - else - adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - (tp->mii.supports_gmii ? - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full : 0); - - rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, - adv); -} - static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -4639,56 +4235,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) { - if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp)) return false; - rtl_speed_down(tp); + phy_speed_down(tp->dev->phydev, false); rtl_wol_suspend_quirk(tp); return true; } -static void r8168_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0000); - break; - default: - break; - } - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); - - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void r8168_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); - break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - rtl_writephy(tp, 0x0e, 0x0200); - default: - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); - break; - } -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@ -4701,8 +4256,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (rtl_wol_pll_power_down(tp)) return; - r8168_phy_power_down(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: @@ -4754,7 +4307,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) break; } - r8168_phy_power_up(tp); + phy_resume(tp->dev->phydev); + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_pll_power_down(struct rtl8169_private *tp) @@ -5172,8 +4727,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { - dprintk("Set MAC Reg C+CR Offset 0xe0. " - "Bit-3 and bit-14 MUST be 1\n"); + netif_dbg(tp, drv, tp->dev, + "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n"); tp->cp_cmd |= (1 << 14); } @@ -5236,12 +4791,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) rtl_csi_write(tp, 0x070c, csi | val << 24); } -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { rtl_csi_access_enable(tp, 0x27); } @@ -5290,6 +4840,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } +static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) +{ + if (enable) { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } +} + static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5337,7 +4898,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) { 0x07, 0, 0x2000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); @@ -5346,7 +4907,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5359,7 +4920,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -5383,7 +4944,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) { 0x06, 0x0080, 0x0000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); @@ -5399,7 +4960,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) { 0x03, 0x0400, 0x0220 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); @@ -5413,14 +4974,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); __rtl_hw_start_8168cp(tp); } static void rtl_hw_start_8168d(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_disable_clock_request(tp); @@ -5435,7 +4996,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) static void rtl_hw_start_8168dp(struct rtl8169_private *tp) { - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5453,7 +5014,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) { 0x0c, 0x0100, 0x0020 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5482,7 +5043,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) { 0x0a, 0x0000, 0x0040 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); @@ -5507,7 +5068,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) { 0x19, 0x0000, 0x0224 } }; - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); @@ -5536,11 +5097,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168f(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5611,7 +5174,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5646,9 +5209,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5681,9 +5244,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_hw_start_8168g(tp); /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) @@ -5700,8 +5263,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); @@ -5711,7 +5273,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5780,6 +5342,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe63e, 0x0000); r8168_mac_ocp_write(tp, 0xc094, 0x0000); r8168_mac_ocp_write(tp, 0xc09e, 0x0000); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep(struct rtl8169_private *tp) @@ -5793,7 +5357,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_csi_access_enable_1(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -5831,11 +5395,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); rtl_hw_start_8168ep(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) @@ -5847,14 +5412,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); rtl_hw_start_8168ep(tp); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) @@ -5868,8 +5434,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) }; /* disable aspm and clock request before access ephy */ - RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); - RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); rtl_hw_start_8168ep(tp); @@ -5889,6 +5454,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) data = r8168_mac_ocp_read(tp, 0xe860); data |= 0x0080; r8168_mac_ocp_write(tp, 0xe860, data); + + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8168(struct rtl8169_private *tp) @@ -6006,8 +5573,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) break; default: - printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - tp->dev->name, tp->mac_version); + netif_err(tp, drv, tp->dev, + "unknown chipset (mac_version = %d)\n", + tp->mac_version); break; } } @@ -6026,7 +5594,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) }; u8 cfg1; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, DBG_REG, FIX_NAK_1); @@ -6045,7 +5613,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); @@ -6100,7 +5668,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) { 0x1e, 0, 0x4000 } }; - rtl_csi_access_enable_2(tp); + rtl_set_def_aspm_entry_latency(tp); /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@ -6384,7 +5952,6 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_enable(&tp->napi); rtl_hw_start(tp); netif_wake_queue(dev); - rtl8169_check_link_status(dev, tp); } static void rtl8169_tx_timeout(struct net_device *dev) @@ -7001,7 +6568,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - rtl8169_check_link_status(dev, tp); + phy_mac_interrupt(dev->phydev); rtl_irq_enable_all(tp); } @@ -7015,7 +6582,6 @@ static void rtl_task(struct work_struct *work) /* XXX - keep rtl_slow_event_work() as first element. */ { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, - { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } }; struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); @@ -7084,11 +6650,51 @@ static void rtl8169_rx_missed(struct net_device *dev) RTL_W32(tp, RxMissed, 0); } +static void r8169_phylink_handler(struct net_device *ndev) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + rtl_link_chg_patch(tp); + pm_request_resume(&tp->pci_dev->dev); + } else { + pm_runtime_idle(&tp->pci_dev->dev); + } + + if (net_ratelimit()) + phy_print_status(ndev->phydev); +} + +static int r8169_phy_connect(struct rtl8169_private *tp) +{ + struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + phy_interface_t phy_mode; + int ret; + + phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII : + PHY_INTERFACE_MODE_MII; + + ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler, + phy_mode); + if (ret) + return ret; + + if (!tp->supports_gmii) + phy_set_max_speed(phydev, SPEED_100); + + /* Ensure to advertise everything, incl. pause */ + phydev->advertising = phydev->supported; + + phy_attached_info(phydev); + + return 0; +} + static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - del_timer_sync(&tp->timer); + phy_stop(dev->phydev); napi_disable(&tp->napi); netif_stop_queue(dev); @@ -7129,6 +6735,8 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); + phy_disconnect(dev->phydev); + pci_free_irq(pdev, 0, tp); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, @@ -7189,6 +6797,10 @@ static int rtl_open(struct net_device *dev) if (retval < 0) goto err_release_fw_2; + retval = r8169_phy_connect(tp); + if (retval) + goto err_free_irq; + rtl_lock_work(tp); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7204,17 +6816,17 @@ static int rtl_open(struct net_device *dev) if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); + phy_start(dev->phydev); netif_start_queue(dev); rtl_unlock_work(tp); - tp->saved_wolopts = 0; pm_runtime_put_sync(&pdev->dev); - - rtl8169_check_link_status(dev, tp); out: return retval; +err_free_irq: + pci_free_irq(pdev, 0, tp); err_release_fw_2: rtl_release_firmware(tp); rtl8169_rx_clear(tp); @@ -7293,6 +6905,7 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; + phy_stop(dev->phydev); netif_device_detach(dev); netif_stop_queue(dev); @@ -7323,6 +6936,9 @@ static void __rtl8169_resume(struct net_device *dev) netif_device_attach(dev); rtl_pll_power_up(tp); + rtl8169_init_phy(dev, tp); + + phy_start(tp->dev->phydev); rtl_lock_work(tp); napi_enable(&tp->napi); @@ -7336,9 +6952,6 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_init_phy(dev, tp); if (netif_running(dev)) __rtl8169_resume(dev); @@ -7352,13 +6965,10 @@ static int rtl8169_runtime_suspend(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) { - rtl_pll_power_down(tp); + if (!tp->TxDescArray) return 0; - } rtl_lock_work(tp); - tp->saved_wolopts = __rtl8169_get_wol(tp); __rtl8169_set_wol(tp, WAKE_ANY); rtl_unlock_work(tp); @@ -7383,11 +6993,8 @@ static int rtl8169_runtime_resume(struct device *device) rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - tp->saved_wolopts = 0; rtl_unlock_work(tp); - rtl8169_init_phy(dev, tp); - __rtl8169_resume(dev); return 0; @@ -7455,7 +7062,7 @@ static void rtl_shutdown(struct pci_dev *pdev) rtl8169_hw_reset(tp); if (system_state == SYSTEM_POWER_OFF) { - if (__rtl8169_get_wol(tp) & WAKE_ANY) { + if (tp->saved_wolopts) { rtl_wol_suspend_quirk(tp); rtl_wol_shutdown_quirk(tp); } @@ -7476,6 +7083,7 @@ static void rtl_remove_one(struct pci_dev *pdev) netif_napi_del(&tp->napi); unregister_netdev(dev); + mdiobus_unregister(tp->mii_bus); rtl_release_firmware(tp); @@ -7561,6 +7169,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond) return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } +static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + return rtl_readphy(tp, phyreg); +} + +static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, + int phyreg, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (phyaddr > 0) + return -ENODEV; + + rtl_writephy(tp, phyreg, val); + + return 0; +} + +static int r8169_mdio_register(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct phy_device *phydev; + struct mii_bus *new_bus; + int ret; + + new_bus = devm_mdiobus_alloc(&pdev->dev); + if (!new_bus) + return -ENOMEM; + + new_bus->name = "r8169"; + new_bus->priv = tp; + new_bus->parent = &pdev->dev; + new_bus->irq[0] = PHY_IGNORE_INTERRUPT; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", + PCI_DEVID(pdev->bus->number, pdev->devfn)); + + new_bus->read = r8169_mdio_read_reg; + new_bus->write = r8169_mdio_write_reg; + + ret = mdiobus_register(new_bus); + if (ret) + return ret; + + phydev = mdiobus_get_phy(new_bus, 0); + if (!phydev) { + mdiobus_unregister(new_bus); + return -ENODEV; + } + + /* PHY will be woken up in rtl_open() */ + phy_suspend(phydev); + + tp->mii_bus = new_bus; + + return 0; +} + static void rtl_hw_init_8168g(struct rtl8169_private *tp) { u32 data; @@ -7618,7 +7288,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; struct rtl8169_private *tp; - struct mii_if_info *mii; struct net_device *dev; int chipset, region, i; int rc; @@ -7638,19 +7307,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->dev = dev; tp->pci_dev = pdev; tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); - - mii = &tp->mii; - mii->dev = dev; - mii->mdio_read = rtl_mdio_read; - mii->mdio_write = rtl_mdio_write; - mii->phy_id_mask = 0x1f; - mii->reg_num_mask = 0x1f; - mii->supports_gmii = cfg->has_gmii; - - /* disable ASPM completely as that cause random device stop working - * problems as well as full system hangs for some PCIe devices users */ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); + tp->supports_gmii = cfg->has_gmii; /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); @@ -7689,6 +7346,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, cfg->default_ver); + if (rtl_tbi_enabled(tp)) { + dev_err(&pdev->dev, "TBI fiber mode not supported\n"); + return -ENODEV; + } + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && @@ -7736,22 +7398,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->saved_wolopts = __rtl8169_get_wol(tp); - if (rtl_tbi_enabled(tp)) { - tp->set_speed = rtl8169_set_speed_tbi; - tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi; - tp->phy_reset_enable = rtl8169_tbi_reset_enable; - tp->phy_reset_pending = rtl8169_tbi_reset_pending; - tp->link_ok = rtl8169_tbi_link_ok; - tp->do_ioctl = rtl_tbi_ioctl; - } else { - tp->set_speed = rtl8169_set_speed_xmii; - tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii; - tp->phy_reset_enable = rtl8169_xmii_reset_enable; - tp->phy_reset_pending = rtl8169_xmii_reset_pending; - tp->link_ok = rtl8169_xmii_link_ok; - tp->do_ioctl = rtl_xmii_ioctl; - } - mutex_init(&tp->wk.mutex); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); @@ -7823,8 +7469,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), @@ -7835,10 +7479,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - rc = register_netdev(dev); - if (rc < 0) + rc = r8169_mdio_register(tp); + if (rc) return rc; + /* chip gets powered up in rtl_open() */ + rtl_pll_power_down(tp); + + rc = register_netdev(dev); + if (rc) + goto err_mdio_unregister; + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), @@ -7853,12 +7504,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (r8168_check_dash(tp)) rtl8168_driver_start(tp); - netif_carrier_off(dev); - if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); return 0; + +err_mdio_unregister: + mdiobus_unregister(tp->mii_bus); + return rc; } static struct pci_driver rtl8169_pci_driver = { diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0d811c02ff34..c06f2df895c2 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1167,7 +1167,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset) } static void ravb_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *estats, u64 *data) { struct ravb_private *priv = netdev_priv(ndev); int i = 0; @@ -1199,7 +1199,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); break; } } @@ -1564,7 +1564,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* TAG and timestamp required flag */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; - desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12); + desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); } skb_tx_timestamp(skb); @@ -1597,7 +1597,8 @@ drop: } static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5614fd231bbe..5573199c4536 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -439,10 +439,15 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, enum_index); } +static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) +{ + return mdp->reg_offset[enum_index]; +} + static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, int enum_index) { - u16 offset = mdp->reg_offset[enum_index]; + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) return; @@ -452,7 +457,7 @@ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) { - u16 offset = mdp->reg_offset[enum_index]; + u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) return ~0U; @@ -622,7 +627,6 @@ static struct sh_eth_cpu_data r7s72100_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -672,7 +676,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -798,7 +801,6 @@ static struct sh_eth_cpu_data r8a77980_data = { .hw_swap = 1, .nbst = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -851,7 +853,6 @@ static struct sh_eth_cpu_data sh7724_data = { .tpauser = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; static void sh_eth_set_rate_sh7757(struct net_device *ndev) @@ -898,7 +899,6 @@ static struct sh_eth_cpu_data sh7757_data = { .hw_swap = 1, .no_ade = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .rtrate = 1, .dual_port = 1, }; @@ -978,7 +978,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .bculr = 1, .hw_swap = 1, .rpadir = 1, - .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, @@ -1467,7 +1466,7 @@ static int sh_eth_dev_init(struct net_device *ndev) /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR); /* all sh_eth int mask */ sh_eth_write(ndev, 0, EESIPR); @@ -1527,9 +1526,9 @@ static int sh_eth_dev_init(struct net_device *ndev) /* mask reset */ if (mdp->cd->apr) - sh_eth_write(ndev, APR_AP, APR); + sh_eth_write(ndev, 1, APR); if (mdp->cd->mpr) - sh_eth_write(ndev, MPR_MP, MPR); + sh_eth_write(ndev, 1, MPR); if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); @@ -2677,34 +2676,36 @@ static int sh_eth_tsu_busy(struct net_device *ndev) return 0; } -static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, +static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset, const u8 *addr) { + struct sh_eth_private *mdp = netdev_priv(ndev); u32 val; val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; - iowrite32(val, reg); + iowrite32(val, mdp->tsu_addr + offset); if (sh_eth_tsu_busy(ndev) < 0) return -EBUSY; val = addr[4] << 8 | addr[5]; - iowrite32(val, reg + 4); + iowrite32(val, mdp->tsu_addr + offset + 4); if (sh_eth_tsu_busy(ndev) < 0) return -EBUSY; return 0; } -static void sh_eth_tsu_read_entry(void *reg, u8 *addr) +static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr) { + struct sh_eth_private *mdp = netdev_priv(ndev); u32 val; - val = ioread32(reg); + val = ioread32(mdp->tsu_addr + offset); addr[0] = (val >> 24) & 0xff; addr[1] = (val >> 16) & 0xff; addr[2] = (val >> 8) & 0xff; addr[3] = val & 0xff; - val = ioread32(reg + 4); + val = ioread32(mdp->tsu_addr + offset + 4); addr[4] = (val >> 8) & 0xff; addr[5] = val & 0xff; } @@ -2713,12 +2714,12 @@ static void sh_eth_tsu_read_entry(void *reg, u8 *addr) static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; u8 c_addr[ETH_ALEN]; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(reg_offset, c_addr); + sh_eth_tsu_read_entry(ndev, reg_offset, c_addr); if (ether_addr_equal(addr, c_addr)) return i; } @@ -2740,7 +2741,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int ret; u8 blank[ETH_ALEN]; @@ -2757,7 +2758,7 @@ static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) { struct sh_eth_private *mdp = netdev_priv(ndev); - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i, ret; if (!mdp->cd->tsu) @@ -2831,15 +2832,15 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) static void sh_eth_tsu_purge_mcast(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); u8 addr[ETH_ALEN]; - void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; if (!mdp->cd->tsu) return; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { - sh_eth_tsu_read_entry(reg_offset, addr); + sh_eth_tsu_read_entry(ndev, reg_offset, addr); if (is_multicast_ether_addr(addr)) sh_eth_tsu_del_entry(ndev, addr); } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 726c55a82dd7..f94be99cf400 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT { /* APR */ enum APR_BIT { - APR_AP = 0x00000001, + APR_AP = 0x0000ffff, }; /* MPR */ enum MPR_BIT { - MPR_MP = 0x00000001, + MPR_MP = 0x0000ffff, }; /* TRSCER */ @@ -403,8 +403,7 @@ enum DESC_I_BIT { /* RPADIR */ enum RPADIR_BIT { - RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, - RPADIR_PADR = 0x0003f, + RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff, }; /* FDR */ @@ -488,7 +487,6 @@ struct sh_eth_cpu_data { u32 ecsipr_value; u32 fdr_value; u32 fcftr_value; - u32 rpadir_value; /* interrupt checking mask */ u32 tx_check; @@ -560,10 +558,4 @@ struct sh_eth_private { unsigned wol_enabled:1; }; -static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, - int enum_index) -{ - return mdp->tsu_addr + mdp->reg_offset[enum_index]; -} - #endif /* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 3bac58d0f88b..c5c297e78d06 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o obj-$(CONFIG_SFC) += sfc.o + +obj-$(CONFIG_SFC_FALCON) += falcon/ diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 019cef1d3cf7..3d76fd1504c2 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) return -ENOMEM; for (i = 0; i < efx->vf_count; i++) { - random_ether_addr(nic_data->vf[i].mac); + eth_random_addr(nic_data->vf[i].mac); nic_data->vf[i].efx = NULL; nic_data->vf[i].vlan = EFX_EF10_NO_VLAN; @@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; - u16 old_vlan, new_vlan; + u16 new_vlan; int rc = 0, rc2 = 0; if (vf_i >= efx->vf_count) @@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, } /* Do the actual vlan change */ - old_vlan = vf->vlan; vf->vlan = new_vlan; /* Restore everything in reverse order */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index ce3a177081a8..330233286e78 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx) static int efx_process_channel(struct efx_channel *channel, int budget) { struct efx_tx_queue *tx_queue; + struct list_head rx_list; int spent; if (unlikely(!channel->enabled)) return 0; + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->pkts_compl = 0; tx_queue->bytes_compl = 0; @@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget) } } + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + return spent; } @@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; } + channel->rx_list = NULL; + return 0; fail: diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 65568925c3ef..961b92979640 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -448,6 +448,7 @@ enum efx_sync_events_state { * __efx_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_list: list of SKBs from current RX, awaiting processing * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel * @sync_events_state: Current state of sync events on this channel @@ -500,6 +501,8 @@ struct efx_channel { unsigned int rx_pkt_n_frags; unsigned int rx_pkt_index; + struct list_head *rx_list; + struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d2e254f2f72b..396ff01298cd 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, return; /* Pass the packet up */ - netif_receive_skb(skb); + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ + list_add_tail(&skb->list, channel->rx_list); + else + /* No list, so pass it up now */ + netif_receive_skb(skb); } /* Handle a received packet. Second half: Touches packet payload. */ diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 949aaef390b6..15c62c160953 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int card_idx = -1; void __iomem *ioaddr; int chip_idx = (int) ent->driver_data; - int irq; struct net_device *dev; struct epic_private *ep; int i, ret, option = 0, duplex = 0; @@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ret = pci_enable_device(pdev); if (ret) goto out; - irq = pdev->irq; if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { dev_err(&pdev->dev, "no PCI region space\n"); diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index e080d3e7c582..01589b6982e4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -780,11 +780,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) static int netsec_napi_poll(struct napi_struct *napi, int budget) { struct netsec_priv *priv; - struct net_device *ndev; int tx, rx, done, todo; priv = container_of(napi, struct netsec_priv, napi); - ndev = priv->ndev; todo = budget; do { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f08625a02cea..7b923362ee55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -61,6 +61,7 @@ struct rk_priv_data { struct clk *mac_clk_tx; struct clk *clk_mac_ref; struct clk *clk_mac_refout; + struct clk *clk_mac_speed; struct clk *aclk_mac; struct clk *pclk_mac; struct clk *clk_phy; @@ -83,6 +84,64 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define PX30_GRF_GMAC_CON1 0x0904 + +/* PX30_GRF_GMAC_CON1 */ +#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define PX30_GMAC_SPEED_100M GRF_BIT(2) + +static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_PHY_INTF_SEL_RMII); +} + +static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + int ret; + + if (IS_ERR(bsp_priv->clk_mac_speed)) { + dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_10M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n", + __func__, ret); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, + PX30_GMAC_SPEED_100M); + + ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000); + if (ret) + dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n", + __func__, ret); + + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops px30_ops = { + .set_to_rmii = px30_set_to_rmii, + .set_rmii_speed = px30_set_rmii_speed, +}; + #define RK3128_GRF_MAC_CON0 0x0168 #define RK3128_GRF_MAC_CON1 0x016c @@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) } } + bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed"); + if (IS_ERR(bsp_priv->clk_mac_speed)) + dev_err(dev, "cannot get clock %s\n", "clk_mac_speed"); + if (bsp_priv->clock_input) { dev_info(dev, "clock input from PHY\n"); } else { @@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) if (!IS_ERR(bsp_priv->mac_clk_tx)) clk_prepare_enable(bsp_priv->mac_clk_tx); + if (!IS_ERR(bsp_priv->clk_mac_speed)) + clk_prepare_enable(bsp_priv->clk_mac_speed); + /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_prepare_enable(bsp_priv->clk_mac); @@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) clk_disable_unprepare(bsp_priv->pclk_mac); clk_disable_unprepare(bsp_priv->mac_clk_tx); + + clk_disable_unprepare(bsp_priv->clk_mac_speed); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); @@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,px30-gmac", .data = &px30_ops }, { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 65bc3556bd8f..edb6053bd980 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -407,6 +407,19 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) } } +static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); +} + static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan)); @@ -441,6 +454,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; @@ -468,5 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = { .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, .enable_tso = dwmac4_enable_tso, + .qmode = dwmac4_qmode, .set_bfsize = dwmac4_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index fe8b536b13f8..79911eefc2a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -183,6 +183,7 @@ struct stmmac_dma_ops { void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); + void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode); void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan); }; @@ -236,6 +237,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) #define stmmac_enable_tso(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_tso, __args) +#define stmmac_dma_qmode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, qmode, __args) #define stmmac_set_dma_bfsize(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, set_bfsize, __args) @@ -444,17 +447,22 @@ struct stmmac_mode_ops { struct stmmac_priv; struct tc_cls_u32_offload; +struct tc_cbs_qopt_offload; struct stmmac_tc_ops { int (*init)(struct stmmac_priv *priv); int (*setup_cls_u32)(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls); + int (*setup_cbs)(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt); }; #define stmmac_tc_init(__priv, __args...) \ stmmac_do_callback(__priv, tc, init, __args) #define stmmac_tc_setup_cls_u32(__priv, __args...) \ stmmac_do_callback(__priv, tc, setup_cls_u32, __args) +#define stmmac_tc_setup_cbs(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cbs, __args) struct stmmac_regs_off { u32 ptp_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ef6a8d39db2f..9d104a05044d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3778,7 +3778,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv, switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, - priv, priv); + priv, priv, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); return 0; @@ -3795,6 +3795,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return stmmac_setup_tc_block(priv, type_data); + case TC_SETUP_QDISC_CBS: + return stmmac_tc_setup_cbs(priv, priv, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 2258cd8cc844..1a96dd9c1091 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv) return 0; } +static int tc_setup_cbs(struct stmmac_priv *priv, + struct tc_cbs_qopt_offload *qopt) +{ + u32 tx_queues_count = priv->plat->tx_queues_to_use; + u32 queue = qopt->queue; + u32 ptr, speed_div; + u32 mode_to_use; + u64 value; + int ret; + + /* Queue 0 is not AVB capable */ + if (queue <= 0 || queue >= tx_queues_count) + return -EINVAL; + if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) + return -EOPNOTSUPP; + + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; + if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) { + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; + } else if (!qopt->enable) { + return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + } + + /* Port Transmit Rate and Speed Divider */ + ptr = (priv->speed == SPEED_100) ? 4 : 8; + speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000; + + /* Final adjustments for HW */ + value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); + + value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); + + value = qopt->hicredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0); + + value = qopt->locredit * 1024ll * 8; + priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0); + + ret = stmmac_config_cbs(priv, priv->hw, + priv->plat->tx_queues_cfg[queue].send_slope, + priv->plat->tx_queues_cfg[queue].idle_slope, + priv->plat->tx_queues_cfg[queue].high_credit, + priv->plat->tx_queues_cfg[queue].low_credit, + queue); + if (ret) + return ret; + + dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n", + queue, qopt->sendslope, qopt->idleslope, + qopt->hicredit, qopt->locredit); + return 0; +} + const struct stmmac_tc_ops dwmac510_tc_ops = { .init = tc_init, .setup_cls_u32 = tc_setup_cls_u32, + .setup_cbs = tc_setup_cbs, }; diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index a5dd627fe2f9..d42f47f6c632 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, } static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 88c12474a0c3..9319d84bf49f 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p) bmsr = err; if (bmsr & BMSR_LSTATUS) { - u16 adv, lpa; - - err = mii_read(np, np->phy_addr, MII_ADVERTISE); - if (err < 0) - goto out; - adv = err; - - err = mii_read(np, np->phy_addr, MII_LPA); - if (err < 0) - goto out; - lpa = err; - - err = mii_read(np, np->phy_addr, MII_ESTATUS); - if (err < 0) - goto out; link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; - } lp->active_speed = current_speed; lp->active_duplex = current_duplex; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index a94f50442613..12539b357a78 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, } static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 163d8d16bc24..dc966ddb6d81 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) struct rx_map *dm; struct rxf_fifo *f; struct rxdb *db; - struct sk_buff *skb; int delta; ENTER; @@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) DBG("db=%p f=%p\n", db, f); dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); DBG("dm=%p\n", dm); - skb = dm->skb; rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ rxfd->va_lo = rxdd->va_lo; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 358edab9e72e..f051ce35a440 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -39,12 +39,15 @@ #include <linux/sys_soc.h> #include <linux/pinctrl/consumer.h> +#include <net/pkt_cls.h> #include "cpsw.h" #include "cpsw_ale.h" #include "cpts.h" #include "davinci_cpdma.h" +#include <net/pkt_sched.h> + #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ NETIF_MSG_DRV | NETIF_MSG_LINK | \ NETIF_MSG_IFUP | NETIF_MSG_INTR | \ @@ -153,6 +156,12 @@ do { \ #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 +#define CPSW_FIFO_SHAPE_EN_SHIFT 16 +#define CPSW_FIFO_RATE_EN_SHIFT 20 +#define CPSW_TC_NUM 4 +#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) +#define CPSW_PCT_MASK 0x7f #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) @@ -253,23 +262,24 @@ struct cpsw_ss_regs { #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ /* Bit definitions for the CPSW2_CONTROL register */ -#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ -#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ -#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ -#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ -#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ -#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ -#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ -#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ -#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ -#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ -#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */ -#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */ -#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ -#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ -#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ -#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ -#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ +#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */ +#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ +#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ +#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ +#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */ +#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ +#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ +#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ +#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */ +#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */ +#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */ +#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */ +#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */ +#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */ +#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */ +#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */ +#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */ #define CTRL_V2_TS_BITS \ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ @@ -281,7 +291,7 @@ struct cpsw_ss_regs { #define CTRL_V3_TS_BITS \ - (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ TS_LTYPE1_EN) @@ -453,6 +463,9 @@ struct cpsw_priv { u8 mac_addr[ETH_ALEN]; bool rx_pause; bool tx_pause; + bool mqprio_hw; + int fifo_bw[CPSW_TC_NUM]; + int shp_cfg_speed; u32 emac_port; struct cpsw_common *cpsw; }; @@ -552,40 +565,28 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { (func)(slave++, ##arg); \ } while (0) -#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \ - do { \ - if (!cpsw->data.dual_emac) \ - break; \ - if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ - ndev = cpsw->slaves[0].ndev; \ - skb->dev = ndev; \ - } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ - ndev = cpsw->slaves[1].ndev; \ - skb->dev = ndev; \ - } \ - } while (0) -#define cpsw_add_mcast(cpsw, priv, addr) \ - do { \ - if (cpsw->data.dual_emac) { \ - struct cpsw_slave *slave = cpsw->slaves + \ - priv->emac_port; \ - int slave_port = cpsw_get_slave_port( \ - slave->slave_num); \ - cpsw_ale_add_mcast(cpsw->ale, addr, \ - 1 << slave_port | ALE_PORT_HOST, \ - ALE_VLAN, slave->port_vlan, 0); \ - } else { \ - cpsw_ale_add_mcast(cpsw->ale, addr, \ - ALE_ALL_PORTS, \ - 0, 0, 0); \ - } \ - } while (0) - static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } +static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr) +{ + struct cpsw_common *cpsw = priv->cpsw; + + if (cpsw->data.dual_emac) { + struct cpsw_slave *slave = cpsw->slaves + priv->emac_port; + int slave_port = cpsw_get_slave_port(slave->slave_num); + + cpsw_ale_add_mcast(cpsw->ale, addr, + 1 << slave_port | ALE_PORT_HOST, + ALE_VLAN, slave->port_vlan, 0); + return; + } + + cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0); +} + static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -693,7 +694,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr); + cpsw_add_mcast(priv, ha->addr); } } } @@ -785,10 +786,16 @@ static void cpsw_rx_handler(void *token, int len, int status) struct sk_buff *skb = token; struct sk_buff *new_skb; struct net_device *ndev = skb->dev; - int ret = 0; + int ret = 0, port; struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); + if (cpsw->data.dual_emac) { + port = CPDMA_RX_SOURCE_PORT(status); + if (port) { + ndev = cpsw->slaves[--port].ndev; + skb->dev = ndev; + } + } if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { /* In dual emac mode check for all interfaces */ @@ -967,8 +974,8 @@ static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) /* process every unprocessed channel */ ch_map = cpdma_ctrl_txchs_state(cpsw->dma); - for (ch = 0, num_tx = 0; ch_map; ch_map >>= 1, ch++) { - if (!(ch_map & 0x01)) + for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { + if (!(ch_map & 0x80)) continue; txv = &cpsw->txv[ch]; @@ -1077,6 +1084,38 @@ static void cpsw_set_slave_mac(struct cpsw_slave *slave, slave_write(slave, mac_lo(priv->mac_addr), SA_LO); } +static bool cpsw_shp_is_off(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = 7 << shift; + val = val & mask; + + return !val; +} + +static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = (1 << --fifo) << shift; + val = on ? val | mask : val & ~mask; + + writel_relaxed(val, &cpsw->regs->ptype); +} + static void _cpsw_adjust_link(struct cpsw_slave *slave, struct cpsw_priv *priv, bool *link) { @@ -1116,6 +1155,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, mac_control |= BIT(4); *link = true; + + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed && + !cpsw_shp_is_off(priv)) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); } else { mac_control = 0; /* disable forwarding */ @@ -1577,6 +1622,231 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) soft_reset_slave(slave); } +static int cpsw_tc_to_fifo(int tc, int num_tc) +{ + if (tc == num_tc - 1) + return 0; + + return CPSW_FIFO_SHAPERS_NUM - tc; +} + +static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 val = 0, send_pct, shift; + struct cpsw_slave *slave; + int pct = 0, i; + + if (bw > priv->shp_cfg_speed * 1000) + goto err; + + /* shaping has to stay enabled for highest fifos linearly + * and fifo bw no more then interface can allow + */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + send_pct = slave_read(slave, SEND_PERCENT); + for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { + if (!bw) { + if (i >= fifo || !priv->fifo_bw[i]) + continue; + + dev_warn(priv->dev, "Prev FIFO%d is shaped", i); + continue; + } + + if (!priv->fifo_bw[i] && i > fifo) { + dev_err(priv->dev, "Upper FIFO%d is not shaped", i); + return -EINVAL; + } + + shift = (i - 1) * 8; + if (i == fifo) { + send_pct &= ~(CPSW_PCT_MASK << shift); + val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); + if (!val) + val = 1; + + send_pct |= val << shift; + pct += val; + continue; + } + + if (priv->fifo_bw[i]) + pct += (send_pct >> shift) & CPSW_PCT_MASK; + } + + if (pct >= 100) + goto err; + + slave_write(slave, send_pct, SEND_PERCENT); + priv->fifo_bw[fifo] = bw; + + dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, + DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); + + return 0; +err: + dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); + return -EINVAL; +} + +static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 tx_in_ctl_rg, val; + int ret; + + ret = cpsw_set_fifo_bw(priv, fifo, bw); + if (ret) + return ret; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; + + if (!bw) + cpsw_fifo_shp_on(priv, fifo, bw); + + val = slave_read(slave, tx_in_ctl_rg); + if (cpsw_shp_is_off(priv)) { + /* disable FIFOs rate limited queues */ + val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); + + /* set type of FIFO queues to normal priority mode */ + val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); + + /* set type of FIFO queues to be rate limited */ + if (bw) + val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; + else + priv->shp_cfg_speed = 0; + } + + /* toggle a FIFO rate limited queue */ + if (bw) + val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + else + val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + slave_write(slave, val, tx_in_ctl_rg); + + /* FIFO transmit shape enable */ + cpsw_fifo_shp_on(priv, fifo, bw); + return 0; +} + +/* Defaults: + * class A - prio 3 + * class B - prio 2 + * shaping for class A should be set first + */ +static int cpsw_set_cbs(struct net_device *ndev, + struct tc_cbs_qopt_offload *qopt) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int prev_speed = 0; + int tc, ret, fifo; + u32 bw = 0; + + tc = netdev_txq_to_tc(priv->ndev, qopt->queue); + + /* enable channels in backward order, as highest FIFOs must be rate + * limited first and for compliance with CPDMA rate limited channels + * that also used in bacward order. FIFO0 cannot be rate limited. + */ + fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); + if (!fifo) { + dev_err(priv->dev, "Last tc%d can't be rate limited", tc); + return -EINVAL; + } + + /* do nothing, it's disabled anyway */ + if (!qopt->enable && !priv->fifo_bw[fifo]) + return 0; + + /* shapers can be set if link speed is known */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + if (slave->phy && slave->phy->link) { + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed) + prev_speed = priv->shp_cfg_speed; + + priv->shp_cfg_speed = slave->phy->speed; + } + + if (!priv->shp_cfg_speed) { + dev_err(priv->dev, "Link speed is not known"); + return -1; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + bw = qopt->enable ? qopt->idleslope : 0; + ret = cpsw_set_fifo_rlimit(priv, fifo, bw); + if (ret) { + priv->shp_cfg_speed = prev_speed; + prev_speed = 0; + } + + if (bw && prev_speed) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); + + pm_runtime_put_sync(cpsw->dev); + return ret; +} + +static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + int fifo, bw; + + for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { + bw = priv->fifo_bw[fifo]; + if (!bw) + continue; + + cpsw_set_fifo_rlimit(priv, fifo, bw); + } +} + +static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 tx_prio_map = 0; + int i, tc, fifo; + u32 tx_prio_rg; + + if (!priv->mqprio_hw) + return; + + for (i = 0; i < 8; i++) { + tc = netdev_get_prio_tc_map(priv->ndev, i); + fifo = CPSW_FIFO_SHAPERS_NUM - tc; + tx_prio_map |= fifo << (4 * i); + } + + tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave_write(slave, tx_prio_map, tx_prio_rg); +} + +/* restore resources after port reset */ +static void cpsw_restore(struct cpsw_priv *priv) +{ + /* restore MQPRIO offload */ + for_each_slave(priv, cpsw_mqprio_resume, priv); + + /* restore CBS offload */ + for_each_slave(priv, cpsw_cbs_resume, priv); +} + static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1656,6 +1926,8 @@ static int cpsw_ndo_open(struct net_device *ndev) } + cpsw_restore(priv); + /* Enable Interrupt pacing if configured */ if (cpsw->coal_intvl != 0) { struct ethtool_coalesce coal; @@ -2190,6 +2462,78 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) return ret; } +static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int fifo, num_tc, count, offset; + struct cpsw_slave *slave; + u32 tx_prio_map = 0; + int i, tc, ret; + + num_tc = mqprio->qopt.num_tc; + if (num_tc > CPSW_TC_NUM) + return -EINVAL; + + if (mqprio->mode != TC_MQPRIO_MODE_DCB) + return -EINVAL; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + if (num_tc) { + for (i = 0; i < 8; i++) { + tc = mqprio->qopt.prio_tc_map[i]; + fifo = cpsw_tc_to_fifo(tc, num_tc); + tx_prio_map |= fifo << (4 * i); + } + + netdev_set_num_tc(ndev, num_tc); + for (i = 0; i < num_tc; i++) { + count = mqprio->qopt.count[i]; + offset = mqprio->qopt.offset[i]; + netdev_set_tc_queue(ndev, i, count, offset); + } + } + + if (!mqprio->qopt.hw) { + /* restore default configuration */ + netdev_reset_tc(ndev); + tx_prio_map = TX_PRIORITY_MAPPING; + } + + priv->mqprio_hw = mqprio->qopt.hw; + + offset = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + slave_write(slave, tx_prio_map, offset); + + pm_runtime_put_sync(cpsw->dev); + + return 0; +} + +static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_CBS: + return cpsw_set_cbs(ndev, type_data); + + case TC_SETUP_QDISC_MQPRIO: + return cpsw_set_mqprio(ndev, type_data); + + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -2205,6 +2549,7 @@ static const struct net_device_ops cpsw_netdev_ops = { #endif .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, + .ndo_setup_tc = cpsw_ndo_setup_tc, }; static int cpsw_get_regs_len(struct net_device *ndev) @@ -2431,7 +2776,7 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) void (*handler)(void *, int, int); struct netdev_queue *queue; struct cpsw_vector *vec; - int ret, *ch; + int ret, *ch, vch; if (rx) { ch = &cpsw->rx_ch_num; @@ -2444,7 +2789,8 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) } while (*ch < ch_num) { - vec[*ch].ch = cpdma_chan_create(cpsw->dma, *ch, handler, rx); + vch = rx ? *ch : 7 - *ch; + vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); queue = netdev_get_tx_queue(priv->ndev, *ch); queue->tx_maxrate = 0; @@ -2927,7 +3273,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); } else { - random_ether_addr(priv_sl2->mac_addr); + eth_random_addr(priv_sl2->mac_addr); dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); } @@ -2935,7 +3281,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) priv_sl2->emac_port = 1; cpsw->slaves[1].ndev = ndev; - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; @@ -2981,7 +3327,7 @@ static int cpsw_probe(struct platform_device *pdev) u32 slave_offset, sliver_offset, slave_size; const struct soc_device_attribute *soc; struct cpsw_common *cpsw; - int ret = 0, i; + int ret = 0, i, ch; int irq; cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); @@ -3156,7 +3502,8 @@ static int cpsw_probe(struct platform_device *pdev) if (soc) cpsw->quirk_irq = 1; - cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + ch = cpsw->quirk_irq ? 0 : 7; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { dev_err(priv->dev, "error initializing tx dma channel\n"); ret = PTR_ERR(cpsw->txv[0].ch); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 6f63c8729afc..b4ea58dc8caf 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) dev_consume_skb_any(skb); dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", mtype, seqid); - } else if (time_after(jiffies, skb_cb->tmo)) { + break; + } + + if (time_after(jiffies, skb_cb->tmo)) { /* timeout any expired skbs over 1s */ dev_dbg(cpts->dev, "expiring tx timestamp mtype %u seqid %04x\n", diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4f1267477aa4..4236dcdd5634 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -406,37 +406,36 @@ static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, struct cpdma_chan *chan; u32 old_rate = ch->rate; u32 new_rmask = 0; - int rlim = 1; + int rlim = 0; int i; - *prio_mode = 0; for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { chan = ctlr->channels[i]; - if (!chan) { - rlim = 0; + if (!chan) continue; - } if (chan == ch) chan->rate = rate; if (chan->rate) { - if (rlim) { - new_rmask |= chan->mask; - } else { - ch->rate = old_rate; - dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n", - chan->chan_num); - return -EINVAL; - } - } else { - *prio_mode = 1; - rlim = 0; + rlim = 1; + new_rmask |= chan->mask; + continue; } + + if (rlim) + goto err; } *rmask = new_rmask; + *prio_mode = rlim; return 0; + +err: + ch->rate = old_rate; + dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", + chan->chan_num); + return -EINVAL; } static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index e40aa3e31af2..a1d335a3c5e4 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) return err; } -static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback) -{ - return 0; -} - static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_vlan_rx_add_vid = netcp_rx_add_vid, .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, .ndo_tx_timeout = netcp_ndo_tx_timeout, - .ndo_select_queue = netcp_select_queue, + .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, }; @@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); devm_iounmap(dev, efuse); devm_release_mem_region(dev, res.start, size); @@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, if (mac_addr) ether_addr_copy(ndev->dev_addr, mac_addr); else - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); } ret = of_property_read_string(node_interface, "rx-channel", diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 2a0c06e0f730..42f1f518dad6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -70,7 +70,8 @@ #define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit * only. This is not documented - * in the HW spec */ + * in the HW spec + */ /* Define for programming the MAC address into the EmacLite */ #define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) @@ -94,11 +95,11 @@ -#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ +#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, /* Set up to output the remaining data */ align_buffer = 0; - to_u8_ptr = (u8 *) &align_buffer; - from_u8_ptr = (u8 *) from_u16_ptr; + to_u8_ptr = (u8 *)&align_buffer; + from_u8_ptr = (u8 *)from_u16_ptr; /* Output the remaining data */ for (; length > 0; length--) @@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u32 align_buffer; from_u32_ptr = src_ptr; - to_u16_ptr = (u16 *) dest_ptr; + to_u16_ptr = (u16 *)dest_ptr; for (; length > 3; length -= 4) { /* Copy each word into the temporary buffer */ @@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, u8 *to_u8_ptr, *from_u8_ptr; /* Set up to read the remaining data */ - to_u8_ptr = (u8 *) to_u16_ptr; + to_u8_ptr = (u8 *)to_u16_ptr; align_buffer = *from_u32_ptr++; - from_u8_ptr = (u8 *) &align_buffer; + from_u8_ptr = (u8 *)&align_buffer; /* Read the remaining data */ for (; length > 0; length--) @@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; } else if (drvdata->tx_ping_pong != 0) { /* If the expected buffer is full, try the other buffer, - * if it is configured in HW */ + * if it is configured in HW + */ addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, return -1; /* Buffer was full, return failure */ /* Write the frame to the buffer */ - xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), addr + XEL_TPLR_OFFSET); @@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame - * has been transmitted */ + * has been transmitted + */ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); @@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * xemaclite_recv_data - Receive a frame * @drvdata: Pointer to the Emaclite device private data * @data: Address where the data is to be received + * @maxlen: Maximum supported ethernet packet length * * This function is intended to be called from the interrupt context or * with a wrapper which waits for the receive frame to be available. @@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) /* The instance is out of sync, try other buffer if other * buffer is configured, return 0 otherwise. If the instance is * out of sync, do not update the 'next_rx_buf_to_use' since it - * will correct on subsequent calls */ + * will correct on subsequent calls + */ if (drvdata->rx_ping_pong != 0) addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); @@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) return 0; /* No data was available */ } - /* Get the protocol type of the ethernet frame that arrived */ + /* Get the protocol type of the ethernet frame that arrived + */ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame - * or an IP packet or an ARP packet */ + * or an IP packet or an ARP packet + */ if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { @@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; else /* Field contains type other than IP or ARP, use max - * frame size and let user parse it */ + * frame size and let user parse it + */ length = ETH_FRAME_LEN + ETH_FCS_LEN; } else /* Use the length in the frame, plus the header and trailer */ @@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = maxlen; /* Read from the EmacLite device */ - xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ @@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /* Determine the expected Tx buffer address */ addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; - xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN); xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); @@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata, /** * xemaclite_set_mac_address - Set the MAC address for this device * @dev: Pointer to the network device instance - * @addr: Void pointer to the sockaddr structure + * @address: Void pointer to the sockaddr structure * * This function copies the HW address from the sockaddr strucutre to the * net_device structure and updates the address in HW. @@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev) struct net_local *lp = netdev_priv(dev); dev->stats.tx_packets++; - if (lp->deferred_skb) { - if (xemaclite_send_data(lp, - (u8 *) lp->deferred_skb->data, - lp->deferred_skb->len) != 0) - return; - else { - dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); - lp->deferred_skb = NULL; - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); - } - } + + if (!lp->deferred_skb) + return; + + if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data, + lp->deferred_skb->len)) + return; + + dev->stats.tx_bytes += lp->deferred_skb->len; + dev_kfree_skb_irq(lp->deferred_skb); + lp->deferred_skb = NULL; + netif_trans_update(dev); /* prevent tx timeout */ + netif_wake_queue(dev); } /** @@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* - * A new skb should have the data halfword aligned, but this code is + /* A new skb should have the data halfword aligned, but this code is * here just in case that isn't true. Calculate how many * bytes we should reserve to get the data to start on a word - * boundary */ + * boundary + */ align = BUFFER_ALIGN(skb->data); if (align) skb_reserve(skb, align); skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data, len); + len = xemaclite_recv_data(lp, (u8 *)skb->data, len); if (!len) { dev->stats.rx_errors++; @@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev) * @dev_id: Void pointer to the network device instance used as callback * reference * + * Return: IRQ_HANDLED + * * This function handles the Tx and Rx interrupts of the EmacLite device. */ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) @@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp) unsigned long end = jiffies + 2; /* wait for the MDIO interface to not be busy or timeout - after some time. - */ + * after some time. + */ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { @@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", + "%s(phy_id=%i, reg=%x) == %x\n", __func__, phy_id, reg, rc); return rc; @@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * * This function waits till the device is ready to accept a new MDIO * request and then writes the val to the MDIO Write Data register. + * + * Return: 0 upon success or a negative error upon failure */ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) @@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, u32 ctrl_reg; dev_dbg(&lp->ndev->dev, - "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + "%s(phy_id=%i, reg=%x, val=%x)\n", __func__, phy_id, reg, val); if (xemaclite_mdio_wait(lp)) @@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, /** * xemaclite_mdio_setup - Register mii_bus for the Emaclite device * @lp: Pointer to the Emaclite device private data - * @ofdev: Pointer to OF device structure + * @dev: Pointer to OF device structure * * This function enables MDIO bus in the Emaclite device and registers a * mii_bus. @@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev) * This function sets the MAC address, requests an IRQ and enables interrupts * for the Emaclite device and starts the Tx queue. * It also connects to the phy device, if MDIO is included in Emaclite device. + * + * Return: 0 on success. -ENODEV, if PHY cannot be connected. + * Non-zero error value on failure. */ static int xemaclite_open(struct net_device *dev) { @@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev) * This function stops the Tx queue, disables interrupts and frees the IRQ for * the Emaclite device. * It also disconnects the phy device associated with the Emaclite device. + * + * Return: 0, always. */ static int xemaclite_close(struct net_device *dev) { @@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) new_skb = orig_skb; spin_lock_irqsave(&lp->reset_lock, flags); - if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { + if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) { /* If the Emaclite Tx buffer is busy, stop the Tx queue and * defer the skb for transmission during the ISR, after the - * current transmission is complete */ + * current transmission is complete + */ netif_stop_queue(dev); lp->deferred_skb = new_skb; /* Take the time stamp now, since we can't do this in an ISR. */ @@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s) { u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); - if (p) { - return (bool)*p; - } else { - dev_warn(&ofdev->dev, "Parameter %s not found," - "defaulting to false\n", s); + if (!p) { + dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s); return false; } + + return (bool)*p; } static const struct net_device_ops xemaclite_netdev_ops; @@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops; /** * xemaclite_of_probe - Probe method for the Emaclite device. * @ofdev: Pointer to OF device structure - * @match: Pointer to the structure used for matching a device * * This function probes for the Emaclite device in the device tree. * It initializes the driver data structure and the hardware, sets the MAC diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 750954be5a74..d3eae1239045 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) && (wait_time < 3000)) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; is_shared = fjes_hw_epid_is_shared(hw->hw_info.share, @@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work) } if (hw->hw_info.buffer_unshare_reserve_bit) { - for (epidx = 0; epidx < hw->max_epid; epidx++) { - if (epidx == hw->my_epid) + for (epidx = 0; epidx < max_epid; epidx++) { + if (epidx == my_epid) continue; if (test_bit(epidx, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ada33c2d9ac2..6acb6b5718b9 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, - gnvh->options, gnvh->opt_len * 4); + gnvh->options, gnvh->opt_len * 4, + TUNNEL_GENEVE_OPT); } else { /* Drop packets w/ critical options, * since we don't support any... @@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh) return sizeof(*gh) + gh->opt_len * 4; } -static struct sk_buff **geneve_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *geneve_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct genevehdr *gh, *gh2; unsigned int hlen, gh_len, off_gnv; const struct packet_offload *ptype; @@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, goto out; } - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh, geneveh->proto_type = htons(ETH_P_TEB); geneveh->rsvd2 = 0; - ip_tunnel_info_opts_get(geneveh->options, info); + if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + ip_tunnel_info_opts_get(geneveh->options, info); } static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index ec629a730005..7a145172d503 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1255,7 +1255,7 @@ out: return skb->len; } -static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { +static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 32f49c4ce457..d79a69dd2146 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) static void decode_prio_command(struct sixpack *sp, unsigned char cmd) { - unsigned char channel; int actual; - channel = cmd & SIXP_CHN_MASK; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, @@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) static void decode_std_command(struct sixpack *sp, unsigned char cmd) { - unsigned char checksum = 0, rest = 0, channel; + unsigned char checksum = 0, rest = 0; short i; - channel = cmd & SIXP_CHN_MASK; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4b6e308199d2..a32ded5b4f41 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -873,6 +873,17 @@ struct netvsc_ethtool_stats { unsigned long wake_queue; }; +struct netvsc_ethtool_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 vf_rx_packets; + u64 vf_rx_bytes; + u64 vf_tx_packets; + u64 vf_tx_bytes; +}; + struct netvsc_vf_pcpu_stats { u64 rx_packets; u64 rx_bytes; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index dd1d6e115145..20275d1e6f9a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) } static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_device_context *ndc = netdev_priv(ndev); @@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, if (vf_ops->ndo_select_queue) txq = vf_ops->ndo_select_queue(vf_netdev, skb, - accel_priv, fallback); + sb_dev, fallback); else - txq = fallback(vf_netdev, skb); + txq = fallback(vf_netdev, skb, NULL); /* Record the queue selected by VF so that it can be * used for common case where VF has more queues than @@ -1118,6 +1118,64 @@ static void netvsc_get_vf_stats(struct net_device *net, } } +static void netvsc_get_pcpu_stats(struct net_device *net, + struct netvsc_ethtool_pcpu_stats *pcpu_tot) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + int i; + + /* fetch percpu stats of vf */ + for_each_possible_cpu(i) { + const struct netvsc_vf_pcpu_stats *stats = + per_cpu_ptr(ndev_ctx->vf_stats, i); + struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i]; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + this_tot->vf_rx_packets = stats->rx_packets; + this_tot->vf_tx_packets = stats->tx_packets; + this_tot->vf_rx_bytes = stats->rx_bytes; + this_tot->vf_tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + this_tot->rx_packets = this_tot->vf_rx_packets; + this_tot->tx_packets = this_tot->vf_tx_packets; + this_tot->rx_bytes = this_tot->vf_rx_bytes; + this_tot->tx_bytes = this_tot->vf_tx_bytes; + } + + /* fetch percpu stats of netvsc */ + for (i = 0; i < nvdev->num_chn; i++) { + const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + const struct netvsc_stats *stats; + struct netvsc_ethtool_pcpu_stats *this_tot = + &pcpu_tot[nvchan->channel->target_cpu]; + u64 packets, bytes; + unsigned int start; + + stats = &nvchan->tx_stats; + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + this_tot->tx_bytes += bytes; + this_tot->tx_packets += packets; + + stats = &nvchan->rx_stats; + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + packets = stats->packets; + bytes = stats->bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + this_tot->rx_bytes += bytes; + this_tot->rx_packets += packets; + } +} + static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { @@ -1215,6 +1273,23 @@ static const struct { { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, +}, pcpu_stats[] = { + { "cpu%u_rx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, + { "cpu%u_rx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) }, + { "cpu%u_tx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) }, + { "cpu%u_tx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) }, + { "cpu%u_vf_rx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) }, + { "cpu%u_vf_rx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) }, + { "cpu%u_vf_tx_packets", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) }, + { "cpu%u_vf_tx_bytes", + offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) }, }, vf_stats[] = { { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, @@ -1226,6 +1301,9 @@ static const struct { #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) +/* statistics per queue (rx/tx packets/bytes) */ +#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) + /* 4 statistics per queue (rx/tx packets/bytes) */ #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) @@ -1241,7 +1319,8 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set) case ETH_SS_STATS: return NETVSC_GLOBAL_STATS_LEN + NETVSC_VF_STATS_LEN - + NETVSC_QUEUE_STATS_LEN(nvdev); + + NETVSC_QUEUE_STATS_LEN(nvdev) + + NETVSC_PCPU_STATS_LEN; default: return -EINVAL; } @@ -1255,9 +1334,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; struct netvsc_vf_pcpu_stats sum; + struct netvsc_ethtool_pcpu_stats *pcpu_sum; unsigned int start; u64 packets, bytes; - int i, j; + int i, j, cpu; if (!nvdev) return; @@ -1289,6 +1369,19 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, data[i++] = packets; data[i++] = bytes; } + + pcpu_sum = kvmalloc_array(num_possible_cpus(), + sizeof(struct netvsc_ethtool_pcpu_stats), + GFP_KERNEL); + netvsc_get_pcpu_stats(dev, pcpu_sum); + for_each_present_cpu(cpu) { + struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; + + for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++) + data[i++] = *(u64 *)((void *)this_sum + + pcpu_stats[j].offset); + } + kvfree(pcpu_sum); } static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -1296,7 +1389,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); u8 *p = data; - int i; + int i, cpu; if (!nvdev) return; @@ -1324,6 +1417,13 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } + for_each_present_cpu(cpu) { + for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) { + sprintf(p, pcpu_stats[i].name, cpu); + p += ETH_GSTRING_LEN; + } + } + break; } } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index adde8fc45588..cfda146f3b3b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; - void *accel_priv = NULL; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; @@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) return NET_XMIT_SUCCESS; } } - - /* For packets that are non-multicast and not bridged we will pass - * the necessary information so that the lowerdev can distinguish - * the source of the packets via the accel_priv value. - */ - accel_priv = vlan->accel_priv; xmit_world: skb->dev = vlan->lowerdev; - return dev_queue_xmit_accel(skb, accel_priv); + return dev_queue_xmit_accel(skb, + netdev_get_sb_channel(dev) ? dev : NULL); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) @@ -1647,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: + case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 4f390fa557e4..7ae1856d1f18 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, } static u16 net_failover_select_queue(struct net_device *dev, - struct sk_buff *skb, void *accel_priv, + struct sk_buff *skb, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct net_failover_info *nfo_info = netdev_priv(dev); @@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev, if (ops->ndo_select_queue) txq = ops->ndo_select_queue(primary_dev, skb, - accel_priv, fallback); + sb_dev, fallback); else - txq = fallback(primary_dev, skb); + txq = fallback(primary_dev, skb, NULL); qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; @@ -219,14 +220,14 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu) struct net_device *primary_dev, *standby_dev; int ret = 0; - primary_dev = rcu_dereference(nfo_info->primary_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); if (primary_dev) { ret = dev_set_mtu(primary_dev, new_mtu); if (ret) return ret; } - standby_dev = rcu_dereference(nfo_info->standby_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); if (standby_dev) { ret = dev_set_mtu(standby_dev, new_mtu); if (ret) { diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 449b2a1a1800..0fee1d06c084 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -13,3 +13,7 @@ endif ifneq ($(CONFIG_NET_DEVLINK),) netdevsim-objs += devlink.o fib.o endif + +ifneq ($(CONFIG_XFRM_OFFLOAD),) +netdevsim-objs += ipsec.o +endif diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 75c25306d234..81444208b216 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { static bool nsim_xdp_offload_active(struct netdevsim *ns) { - return ns->xdp_prog_mode == XDP_ATTACHED_HW; + return ns->xdp_hw.prog; } static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) @@ -195,14 +195,14 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns)); } -static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) +static int +nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, + struct xdp_attachment_info *xdp) { int err; - if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) { - NSIM_EA(bpf->extack, "program loaded with different flags"); + if (!xdp_attachment_flags_ok(xdp, bpf)) return -EBUSY; - } if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); @@ -219,18 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf) return err; } - if (ns->xdp_prog) - bpf_prog_put(ns->xdp_prog); - - ns->xdp_prog = bpf->prog; - ns->xdp_flags = bpf->flags; - - if (!bpf->prog) - ns->xdp_prog_mode = XDP_ATTACHED_NONE; - else if (bpf->command == XDP_SETUP_PROG) - ns->xdp_prog_mode = XDP_ATTACHED_DRV; - else - ns->xdp_prog_mode = XDP_ATTACHED_HW; + xdp_attachment_setup(xdp, bpf); return 0; } @@ -249,8 +238,8 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) state->state = "verify"; /* Program id is not populated yet when we create the state. */ - sprintf(name, "%u", ns->prog_id_gen++); - state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs); + sprintf(name, "%u", ns->sdev->prog_id_gen++); + state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs); if (IS_ERR_OR_NULL(state->ddir)) { kfree(state); return -ENOMEM; @@ -261,7 +250,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) &state->state, &nsim_bpf_string_fops); debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded); - list_add_tail(&state->l, &ns->bpf_bound_progs); + list_add_tail(&state->l, &ns->sdev->bpf_bound_progs); prog->aux->offload->dev_priv = state; @@ -290,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); return -EINVAL; } - if (nsim_xdp_offload_active(ns)) { - NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog"); - return -EBUSY; - } return 0; } @@ -309,7 +294,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); return -EINVAL; } - if (bpf->prog->aux->offload->netdev != ns->netdev) { + if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) { NSIM_EA(bpf->extack, "program bound to different dev"); return -EINVAL; } @@ -512,7 +497,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) } offmap->dev_ops = &nsim_bpf_map_ops; - list_add_tail(&nmap->l, &ns->bpf_bound_maps); + list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps); return 0; @@ -567,22 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) nsim_bpf_destroy_prog(bpf->offload.prog); return 0; case XDP_QUERY_PROG: - bpf->prog_attached = ns->xdp_prog_mode; - bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0; - bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0; - return 0; + return xdp_attachment_query(&ns->xdp, bpf); + case XDP_QUERY_PROG_HW: + return xdp_attachment_query(&ns->xdp_hw, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp); case XDP_SETUP_PROG_HW: err = nsim_setup_prog_hw_checks(ns, bpf); if (err) return err; - return nsim_xdp_set_prog(ns, bpf); + return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw); case BPF_OFFLOAD_MAP_ALLOC: if (!ns->bpf_map_accept) return -EOPNOTSUPP; @@ -598,8 +582,26 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) int nsim_bpf_init(struct netdevsim *ns) { - INIT_LIST_HEAD(&ns->bpf_bound_progs); - INIT_LIST_HEAD(&ns->bpf_bound_maps); + int err; + + if (ns->sdev->refcnt == 1) { + INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs); + INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps); + + ns->sdev->ddir_bpf_bound_progs = + debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); + if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) + return -ENOMEM; + + ns->sdev->bpf_dev = bpf_offload_dev_create(); + err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); + if (err) + return err; + } + + err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev); + if (err) + goto err_destroy_bdev; debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, &ns->bpf_offloaded_id); @@ -609,10 +611,6 @@ int nsim_bpf_init(struct netdevsim *ns) &ns->bpf_bind_accept); debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir, &ns->bpf_bind_verifier_delay); - ns->ddir_bpf_bound_progs = - debugfs_create_dir("bpf_bound_progs", ns->ddir); - if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs)) - return -ENOMEM; ns->bpf_tc_accept = true; debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir, @@ -631,12 +629,23 @@ int nsim_bpf_init(struct netdevsim *ns) &ns->bpf_map_accept); return 0; + +err_destroy_bdev: + if (ns->sdev->refcnt == 1) + bpf_offload_dev_destroy(ns->sdev->bpf_dev); + return err; } void nsim_bpf_uninit(struct netdevsim *ns) { - WARN_ON(!list_empty(&ns->bpf_bound_progs)); - WARN_ON(!list_empty(&ns->bpf_bound_maps)); - WARN_ON(ns->xdp_prog); + WARN_ON(ns->xdp.prog); + WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); + bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev); + + if (ns->sdev->refcnt == 1) { + WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); + WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); + bpf_offload_dev_destroy(ns->sdev->bpf_dev); + } } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c new file mode 100644 index 000000000000..2dcf6cc269d0 --- /dev/null +++ b/drivers/net/netdevsim/ipsec.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include <crypto/aead.h> +#include <linux/debugfs.h> +#include <net/xfrm.h> + +#include "netdevsim.h" + +#define NSIM_IPSEC_AUTH_BITS 128 + +static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = filp->private_data; + struct nsim_ipsec *ipsec = &ns->ipsec; + size_t bufsize; + char *buf, *p; + int len; + int i; + + /* the buffer needed is + * (num SAs * 3 lines each * ~60 bytes per line) + one more line + */ + bufsize = (ipsec->count * 4 * 60) + 60; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + p = buf; + p += snprintf(p, bufsize - (p - buf), + "SA count=%u tx=%u\n", + ipsec->count, ipsec->tx); + + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + struct nsim_sa *sap = &ipsec->sa[i]; + + if (!sap->used) + continue; + + p += snprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", + i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], + sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", + i, be32_to_cpu(sap->xs->id.spi), + sap->xs->id.proto, sap->salt, sap->crypt); + p += snprintf(p, bufsize - (p - buf), + "sa[%i] key=0x%08x %08x %08x %08x\n", + i, sap->key[0], sap->key[1], + sap->key[2], sap->key[3]); + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf); + + kfree(buf); + return len; +} + +static const struct file_operations ipsec_dbg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = nsim_dbg_netdev_ops_read, +}; + +static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec) +{ + u32 i; + + if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search sa table */ + for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->sa[i].used) + return i; + } + + return -ENOSPC; +} + +static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + NSIM_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* 160 accounts for 16 byte key and 4 byte salt */ + if (key_len > NSIM_IPSEC_AUTH_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == NSIM_IPSEC_AUTH_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +static int nsim_ipsec_add_sa(struct xfrm_state *xs) +{ + struct nsim_ipsec *ipsec; + struct net_device *dev; + struct netdevsim *ns; + struct nsim_sa sa; + u16 sa_idx; + int ret; + + dev = xs->xso.dev; + ns = netdev_priv(dev); + ipsec = &ns->ipsec; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = nsim_ipsec_find_empty_idx(ipsec); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&sa, 0, sizeof(sa)); + sa.used = true; + sa.xs = xs; + + if (sa.xs->id.proto & IPPROTO_ESP) + sa.crypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for SA table\n"); + return ret; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa.rx = true; + + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); + } + + /* the preparations worked, so save the info */ + memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); + + /* the XFRM stack doesn't like offload_handle == 0, + * so add a bitflag in case our array index is 0 + */ + xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID; + ipsec->count++; + + return 0; +} + +static void nsim_ipsec_del_sa(struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + u16 sa_idx; + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (!ipsec->sa[sa_idx].used) { + netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n", + sa_idx); + return; + } + + memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa)); + ipsec->count--; +} + +static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct nsim_ipsec *ipsec = &ns->ipsec; + + ipsec->ok++; + + return true; +} + +static const struct xfrmdev_ops nsim_xfrmdev_ops = { + .xdo_dev_state_add = nsim_ipsec_add_sa, + .xdo_dev_state_delete = nsim_ipsec_del_sa, + .xdo_dev_offload_ok = nsim_ipsec_offload_ok, +}; + +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + struct xfrm_state *xs; + struct nsim_sa *tsa; + u32 sa_idx; + + /* do we even need to check this packet? */ + if (!skb->sp) + return true; + + if (unlikely(!skb->sp->len)) { + netdev_err(ns->netdev, "no xfrm state len = %d\n", + skb->sp->len); + return false; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs); + return false; + } + + sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID; + if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) { + netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n", + sa_idx, NSIM_IPSEC_MAX_SA_COUNT); + return false; + } + + tsa = &ipsec->sa[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx); + return false; + } + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto); + return false; + } + + ipsec->tx++; + + return true; +} + +void nsim_ipsec_init(struct netdevsim *ns) +{ + ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops; + +#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + ns->netdev->features |= NSIM_ESP_FEATURES; + ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; + + ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns, + &ipsec_dbg_fops); +} + +void nsim_ipsec_teardown(struct netdevsim *ns) +{ + struct nsim_ipsec *ipsec = &ns->ipsec; + + if (ipsec->count) + netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n", + ipsec->count); + debugfs_remove_recursive(ipsec->pfile); +} diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index ec68f38213d9..8d8e2b3f263e 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,6 +22,7 @@ #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/rtnetlink.h> +#include <net/switchdev.h> #include "netdevsim.h" @@ -40,6 +41,9 @@ struct nsim_vf_config { static u32 nsim_dev_id; +static struct dentry *nsim_ddir; +static struct dentry *nsim_sdev_ddir; + static int nsim_num_vf(struct device *dev) { struct netdevsim *ns = to_nsim(dev); @@ -144,8 +148,29 @@ static struct device_type nsim_dev_type = { .release = nsim_dev_release, }; +static int +nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct netdevsim *ns = netdev_priv(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(ns->sdev->switch_id); + memcpy(&attr->u.ppid.id, &ns->sdev->switch_id, + attr->u.ppid.id_len); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct switchdev_ops nsim_switchdev_ops = { + .switchdev_port_attr_get = nsim_port_attr_get, +}; + static int nsim_init(struct net_device *dev) { + char sdev_ddir_name[10], sdev_link_name[32]; struct netdevsim *ns = netdev_priv(dev); int err; @@ -154,9 +179,32 @@ static int nsim_init(struct net_device *dev) if (IS_ERR_OR_NULL(ns->ddir)) return -ENOMEM; + if (!ns->sdev) { + ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL); + if (!ns->sdev) { + err = -ENOMEM; + goto err_debugfs_destroy; + } + ns->sdev->refcnt = 1; + ns->sdev->switch_id = nsim_dev_id; + sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); + ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name, + nsim_sdev_ddir); + if (IS_ERR_OR_NULL(ns->sdev->ddir)) { + err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL; + goto err_sdev_free; + } + } else { + sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); + ns->sdev->refcnt++; + } + + sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name); + debugfs_create_symlink("sdev", ns->ddir, sdev_link_name); + err = nsim_bpf_init(ns); if (err) - goto err_debugfs_destroy; + goto err_sdev_destroy; ns->dev.id = nsim_dev_id++; ns->dev.bus = &nsim_bus; @@ -166,17 +214,26 @@ static int nsim_init(struct net_device *dev) goto err_bpf_uninit; SET_NETDEV_DEV(dev, &ns->dev); + SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops); err = nsim_devlink_setup(ns); if (err) goto err_unreg_dev; + nsim_ipsec_init(ns); + return 0; err_unreg_dev: device_unregister(&ns->dev); err_bpf_uninit: nsim_bpf_uninit(ns); +err_sdev_destroy: + if (!--ns->sdev->refcnt) { + debugfs_remove_recursive(ns->sdev->ddir); +err_sdev_free: + kfree(ns->sdev); + } err_debugfs_destroy: debugfs_remove_recursive(ns->ddir); return err; @@ -186,9 +243,14 @@ static void nsim_uninit(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + nsim_ipsec_teardown(ns); nsim_devlink_teardown(ns); debugfs_remove_recursive(ns->ddir); nsim_bpf_uninit(ns); + if (!--ns->sdev->refcnt) { + debugfs_remove_recursive(ns->sdev->ddir); + kfree(ns->sdev); + } } static void nsim_free(struct net_device *dev) @@ -203,11 +265,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + if (!nsim_ipsec_tx(ns, skb)) + goto out; + u64_stats_update_begin(&ns->syncp); ns->tx_packets++; ns->tx_bytes += skb->len; u64_stats_update_end(&ns->syncp); +out: dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -221,8 +287,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) { struct netdevsim *ns = netdev_priv(dev); - if (ns->xdp_prog_mode == XDP_ATTACHED_DRV && - new_mtu > NSIM_XDP_MAX_MTU) + if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; dev->mtu = new_mtu; @@ -260,7 +325,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) switch (f->command) { case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb, - ns, ns); + ns, ns, f->extack); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns); return 0; @@ -464,15 +529,46 @@ static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } +static int nsim_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct netdevsim *ns = netdev_priv(dev); + + if (tb[IFLA_LINK]) { + struct net_device *joindev; + struct netdevsim *joinns; + + joindev = __dev_get_by_index(src_net, + nla_get_u32(tb[IFLA_LINK])); + if (!joindev) + return -ENODEV; + if (joindev->netdev_ops != &nsim_netdev_ops) + return -EINVAL; + + joinns = netdev_priv(joindev); + if (!joinns->sdev || !joinns->sdev->refcnt) + return -EINVAL; + ns->sdev = joinns->sdev; + } + + return register_netdevice(dev); +} + +static void nsim_dellink(struct net_device *dev, struct list_head *head) +{ + unregister_netdevice_queue(dev, head); +} + static struct rtnl_link_ops nsim_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct netdevsim), .setup = nsim_setup, .validate = nsim_validate, + .newlink = nsim_newlink, + .dellink = nsim_dellink, }; -struct dentry *nsim_ddir; - static int __init nsim_module_init(void) { int err; @@ -481,9 +577,15 @@ static int __init nsim_module_init(void) if (IS_ERR_OR_NULL(nsim_ddir)) return -ENOMEM; + nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL); + if (IS_ERR_OR_NULL(nsim_sdev_ddir)) { + err = -ENOMEM; + goto err_debugfs_destroy; + } + err = bus_register(&nsim_bus); if (err) - goto err_debugfs_destroy; + goto err_sdir_destroy; err = nsim_devlink_init(); if (err) @@ -499,6 +601,8 @@ err_dl_fini: nsim_devlink_exit(); err_unreg_bus: bus_unregister(&nsim_bus); +err_sdir_destroy: + debugfs_remove_recursive(nsim_sdev_ddir); err_debugfs_destroy: debugfs_remove_recursive(nsim_ddir); return err; @@ -509,6 +613,7 @@ static void __exit nsim_module_exit(void) rtnl_link_unregister(&nsim_link_ops); nsim_devlink_exit(); bus_unregister(&nsim_bus); + debugfs_remove_recursive(nsim_sdev_ddir); debugfs_remove_recursive(nsim_ddir); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 8ca50b72c328..384c254fafc5 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> +#include <net/xdp.h> #define DRV_NAME "netdevsim" @@ -26,9 +27,46 @@ #define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg) struct bpf_prog; +struct bpf_offload_dev; struct dentry; struct nsim_vf_config; +struct netdevsim_shared_dev { + unsigned int refcnt; + u32 switch_id; + + struct dentry *ddir; + + struct bpf_offload_dev *bpf_dev; + + struct dentry *ddir_bpf_bound_progs; + u32 prog_id_gen; + + struct list_head bpf_bound_progs; + struct list_head bpf_bound_maps; +}; + +#define NSIM_IPSEC_MAX_SA_COUNT 33 +#define NSIM_IPSEC_VALID BIT(31) + +struct nsim_sa { + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + bool used; + bool crypt; + bool rx; +}; + +struct nsim_ipsec { + struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT]; + struct dentry *pfile; + u32 count; + u32 tx; + u32 ok; +}; + struct netdevsim { struct net_device *netdev; @@ -37,6 +75,7 @@ struct netdevsim { struct u64_stats_sync syncp; struct device dev; + struct netdevsim_shared_dev *sdev; struct dentry *ddir; @@ -46,16 +85,11 @@ struct netdevsim { struct bpf_prog *bpf_offloaded; u32 bpf_offloaded_id; - u32 xdp_flags; - int xdp_prog_mode; - struct bpf_prog *xdp_prog; - - u32 prog_id_gen; + struct xdp_attachment_info xdp; + struct xdp_attachment_info xdp_hw; bool bpf_bind_accept; u32 bpf_bind_verifier_delay; - struct dentry *ddir_bpf_bound_progs; - struct list_head bpf_bound_progs; bool bpf_tc_accept; bool bpf_tc_non_bound_accept; @@ -63,14 +97,12 @@ struct netdevsim { bool bpf_xdpoffload_accept; bool bpf_map_accept; - struct list_head bpf_bound_maps; #if IS_ENABLED(CONFIG_NET_DEVLINK) struct devlink *devlink; #endif + struct nsim_ipsec ipsec; }; -extern struct dentry *nsim_ddir; - #ifdef CONFIG_BPF_SYSCALL int nsim_bpf_init(struct netdevsim *ns); void nsim_bpf_uninit(struct netdevsim *ns); @@ -148,6 +180,25 @@ static inline void nsim_devlink_exit(void) } #endif +#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) +void nsim_ipsec_init(struct netdevsim *ns); +void nsim_ipsec_teardown(struct netdevsim *ns); +bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb); +#else +static inline void nsim_ipsec_init(struct netdevsim *ns) +{ +} + +static inline void nsim_ipsec_teardown(struct netdevsim *ns) +{ +} + +static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) +{ + return true; +} +#endif + static inline struct netdevsim *to_nsim(struct device *ptr) { return container_of(ptr, struct netdevsim, dev); diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 9f6f7ccd44f7..b12023bc2cab 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->hw_features = ndev->features; ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS); - random_ether_addr(ndev->perm_addr); + eth_random_addr(ndev->perm_addr); memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len); ndev->netdev_ops = &ntb_netdev_ops; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 343989f9f9d9..82070792edbb 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -28,7 +28,7 @@ config MDIO_BCM_IPROC config MDIO_BCM_UNIMAC tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO + depends on HAS_IOMEM help This module provides a driver for the Broadcom UniMAC MDIO busses. This hardware can be found in the Broadcom GENET Ethernet MAC @@ -92,7 +92,8 @@ config MDIO_CAVIUM config MDIO_GPIO tristate "GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB + depends on MDIO_BITBANG + depends on GPIOLIB || COMPILE_TEST ---help--- Supports GPIO lib-based MDIO busses. @@ -213,6 +214,7 @@ comment "MII PHY device drivers" config SFP tristate "SFP cage support" depends on I2C && PHYLINK + depends on HWMON || HWMON=n select MDIO_I2C config AMD_PHY diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 49ac678eb2dc..78cad134a79e 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -21,6 +21,7 @@ #define MII_DP83811_SGMII_CTRL 0x09 #define MII_DP83811_INT_STAT1 0x12 #define MII_DP83811_INT_STAT2 0x13 +#define MII_DP83811_INT_STAT3 0x18 #define MII_DP83811_RESET_CTRL 0x1f #define DP83811_HW_RESET BIT(15) @@ -44,6 +45,11 @@ #define DP83811_OVERVOLTAGE_INT_EN BIT(6) #define DP83811_UNDERVOLTAGE_INT_EN BIT(7) +/* INT_STAT3 bits */ +#define DP83811_LPS_INT_EN BIT(0) +#define DP83811_NO_FRAME_INT_EN BIT(3) +#define DP83811_POR_DONE_INT_EN BIT(4) + #define MII_DP83811_RXSOP1 0x04a5 #define MII_DP83811_RXSOP2 0x04a6 #define MII_DP83811_RXSOP3 0x04a7 @@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev) if (err < 0) return err; + err = phy_read(phydev, MII_DP83811_INT_STAT3); + if (err < 0) + return err; + return 0; } @@ -216,6 +226,18 @@ static int dp83811_config_intr(struct phy_device *phydev) DP83811_UNDERVOLTAGE_INT_EN); err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status); + if (err < 0) + return err; + + misr_status = phy_read(phydev, MII_DP83811_INT_STAT3); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_LPS_INT_EN | + DP83811_NO_FRAME_INT_EN | + DP83811_POR_DONE_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status); } else { err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); @@ -223,6 +245,10 @@ static int dp83811_config_intr(struct phy_device *phydev) return err; err = phy_write(phydev, MII_DP83811_INT_STAT2, 0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_DP83811_INT_STAT3, 0); } return err; @@ -258,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev) if (err < 0) return err; + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - value = phy_read(phydev, MII_DP83811_SGMII_CTRL); - if (!(value & DP83811_SGMII_EN)) { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, (DP83811_SGMII_EN | value)); - if (err < 0) - return err; - } else { - err = phy_write(phydev, MII_DP83811_SGMII_CTRL, - (~DP83811_SGMII_EN & value)); - if (err < 0) - return err; - } + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_EN & value)); } + if (err < 0) + + return err; + value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 001fe1df7557..67b260877f30 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); - goto err_pdev; - } + if (IS_ERR(pdev)) + return PTR_ERR(pdev); fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { @@ -287,7 +285,6 @@ err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: platform_device_unregister(pdev); -err_pdev: return ret; } module_init(fixed_mdio_bus_init); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1cd439bdf608..f7c69ca34056 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -679,7 +679,7 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - mdelay(500); + msleep(500); err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 082ffef0dec4..bc90764a8b8d 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -20,23 +20,23 @@ struct mdio_mux_gpio_state { struct gpio_descs *gpios; void *mux_handle; + int values[]; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_gpio_state *s = data; - int values[s->gpios->ndescs]; unsigned int n; if (current_child == desired_child) return 0; for (n = 0; n < s->gpios->ndescs; n++) - values[n] = (desired_child >> n) & 1; + s->values[n] = (desired_child >> n) & 1; gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, - values); + s->values); return 0; } @@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; + struct gpio_descs *gpios; int r; - s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); - if (!s) + gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + if (IS_ERR(gpios)) + return PTR_ERR(gpios); + + s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs), + GFP_KERNEL); + if (!s) { + gpiod_put_array(gpios); return -ENOMEM; + } - s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(s->gpios)) - return PTR_ERR(s->gpios); + s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 650c2667d523..84ca9ff40ae0 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -123,7 +123,7 @@ static const struct vsc8531_edge_rate_table edge_table[] = { }; #endif /* CONFIG_OF_MDIO */ -static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) +static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page) { int rc; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 6c9b24fe3148..1ee25877c4d1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -467,6 +467,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL(phy_mii_ioctl); +static int phy_config_aneg(struct phy_device *phydev) +{ + if (phydev->drv->config_aneg) + return phydev->drv->config_aneg(phydev); + + /* Clause 45 PHYs that don't implement Clause 22 registers are not + * allowed to call genphy_config_aneg() + */ + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + return -EOPNOTSUPP; + + return genphy_config_aneg(phydev); +} + /** * phy_start_aneg_priv - start auto-negotiation for this PHY device * @phydev: the phy_device struct @@ -493,10 +507,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) /* Invalidate LP advertising flags */ phydev->lp_advertising = 0; - if (phydev->drv->config_aneg) - err = phydev->drv->config_aneg(phydev); - else - err = genphy_config_aneg(phydev); + err = phy_config_aneg(phydev); if (err < 0) goto out_unlock; @@ -514,7 +525,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) * negotiation may already be done and aneg interrupt may not be * generated. */ - if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { + if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) { err = phy_aneg_done(phydev); if (err > 0) { trigger = true; @@ -546,6 +557,84 @@ int phy_start_aneg(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start_aneg); +static int phy_poll_aneg_done(struct phy_device *phydev) +{ + unsigned int retries = 100; + int ret; + + do { + msleep(100); + ret = phy_aneg_done(phydev); + } while (!ret && --retries); + + if (!ret) + return -ETIMEDOUT; + + return ret < 0 ? ret : 0; +} + +/** + * phy_speed_down - set speed to lowest speed supported by both link partners + * @phydev: the phy_device struct + * @sync: perform action synchronously + * + * Description: Typically used to save energy when waiting for a WoL packet + * + * WARNING: Setting sync to false may cause the system being unable to suspend + * in case the PHY generates an interrupt when finishing the autonegotiation. + * This interrupt may wake up the system immediately after suspend. + * Therefore use sync = false only if you're sure it's safe with the respective + * network chip. + */ +int phy_speed_down(struct phy_device *phydev, bool sync) +{ + u32 adv = phydev->lp_advertising & phydev->supported; + u32 adv_old = phydev->advertising; + int ret; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + if (adv & PHY_10BT_FEATURES) + phydev->advertising &= ~(PHY_100BT_FEATURES | + PHY_1000BT_FEATURES); + else if (adv & PHY_100BT_FEATURES) + phydev->advertising &= ~PHY_1000BT_FEATURES; + + if (phydev->advertising == adv_old) + return 0; + + ret = phy_config_aneg(phydev); + if (ret) + return ret; + + return sync ? phy_poll_aneg_done(phydev) : 0; +} +EXPORT_SYMBOL_GPL(phy_speed_down); + +/** + * phy_speed_up - (re)set advertised speeds to all supported speeds + * @phydev: the phy_device struct + * + * Description: Used to revert the effect of phy_speed_down + */ +int phy_speed_up(struct phy_device *phydev) +{ + u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; + u32 adv_old = phydev->advertising; + + if (phydev->autoneg != AUTONEG_ENABLE) + return 0; + + phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); + + if (phydev->advertising == adv_old) + return 0; + + return phy_config_aneg(phydev); +} +EXPORT_SYMBOL_GPL(phy_speed_up); + /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct @@ -894,7 +983,7 @@ void phy_state_machine(struct work_struct *work) needs_aneg = true; break; case PHY_NOLINK: - if (phydev->irq != PHY_POLL) + if (!phy_polling_mode(phydev)) break; err = phy_read_status(phydev); @@ -935,7 +1024,7 @@ void phy_state_machine(struct work_struct *work) /* Only register a CHANGE if we are polling and link changed * since latest checking. */ - if (phydev->irq == PHY_POLL) { + if (phy_polling_mode(phydev)) { old_link = phydev->link; err = phy_read_status(phydev); if (err) @@ -1034,7 +1123,7 @@ void phy_state_machine(struct work_struct *work) * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving * between states from phy_mac_interrupt() */ - if (phydev->irq == PHY_POLL) + if (phy_polling_mode(phydev)) queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, PHY_STATE_TIME * HZ); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b9f5f40a7ac1..db1172db1e7c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1555,6 +1555,14 @@ int genphy_read_status(struct phy_device *phydev) if (adv < 0) return adv; + if (lpagb & LPA_1000MSFAIL) { + if (adv & CTL1000_ENABLE_MASTER) + phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); + else + phydev_err(phydev, "Master/Slave resolution failed\n"); + return -ENOLINK; + } + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb); common_adv_gb = lpagb & adv << 2; diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 082fb40c656d..7fc8508b5231 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -37,6 +37,9 @@ #define RTL8201F_ISR 0x1e #define RTL8201F_IER 0x13 +#define RTL8366RB_POWER_SAVE 0x15 +#define RTL8366RB_POWER_SAVE_ON BIT(12) + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -128,6 +131,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev) return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); } +static int rtl8211_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_aneg(phydev); + if (ret < 0) + return ret; + + /* Quirk was copied from vendor driver. Unfortunately it includes no + * description of the magic numbers. + */ + if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) { + phy_write(phydev, 0x17, 0x2138); + phy_write(phydev, 0x0e, 0x0260); + } else { + phy_write(phydev, 0x17, 0x2108); + phy_write(phydev, 0x0e, 0x0000); + } + + return 0; +} + +static int rtl8211c_config_init(struct phy_device *phydev) +{ + /* RTL8211C has an issue when operating in Gigabit slave mode */ + phy_set_bits(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + + return genphy_config_init(phydev); +} + static int rtl8211f_config_init(struct phy_device *phydev) { int ret; @@ -159,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev) return genphy_resume(phydev); } +static int rtl8366rb_config_init(struct phy_device *phydev) +{ + int ret; + + ret = genphy_config_init(phydev); + if (ret < 0) + return ret; + + ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE, + RTL8366RB_POWER_SAVE_ON); + if (ret) { + dev_err(&phydev->mdio.dev, + "error enabling power management\n"); + } + + return ret; +} + static struct phy_driver realtek_drvs[] = { { .phy_id = 0x00008201, @@ -179,6 +231,14 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { + .phy_id = 0x001cc910, + .name = "RTL8211 Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_aneg = rtl8211_config_aneg, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -191,6 +251,14 @@ static struct phy_driver realtek_drvs[] = { .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, }, { + .phy_id = 0x001cc913, + .name = "RTL8211C Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .config_init = rtl8211c_config_init, + .read_mmd = &genphy_read_mmd_unsupported, + .write_mmd = &genphy_write_mmd_unsupported, + }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -223,6 +291,15 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + .phy_id = 0x001cc961, + .name = "RTL8366RB Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &rtl8366rb_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; @@ -230,10 +307,13 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc816, 0x001fffff }, + { 0x001cc910, 0x001fffff }, { 0x001cc912, 0x001fffff }, + { 0x001cc913, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, + { 0x001cc961, 0x001fffff }, { } }; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c4c92db86dfa..5661226cf75b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1,5 +1,7 @@ +#include <linux/ctype.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/hwmon.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/jiffies.h> @@ -131,6 +133,12 @@ struct sfp { unsigned int sm_retries; struct sfp_eeprom_id id; +#if IS_ENABLED(CONFIG_HWMON) + struct sfp_diag diag; + struct device *hwmon_dev; + char *hwmon_name; +#endif + }; static bool sff_module_supported(const struct sfp_eeprom_id *id) @@ -316,6 +324,719 @@ static unsigned int sfp_check(void *buf, size_t len) return check; } +/* hwmon */ +#if IS_ENABLED(CONFIG_HWMON) +static umode_t sfp_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct sfp *sfp = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_lcrit_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_lcrit: + case hwmon_temp_crit: + return 0444; + default: + return 0; + } + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min_alarm: + case hwmon_in_max_alarm: + case hwmon_in_lcrit_alarm: + case hwmon_in_crit_alarm: + case hwmon_in_min: + case hwmon_in_max: + case hwmon_in_lcrit: + case hwmon_in_crit: + return 0444; + default: + return 0; + } + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min_alarm: + case hwmon_curr_max_alarm: + case hwmon_curr_lcrit_alarm: + case hwmon_curr_crit_alarm: + case hwmon_curr_min: + case hwmon_curr_max: + case hwmon_curr_lcrit: + case hwmon_curr_crit: + return 0444; + default: + return 0; + } + case hwmon_power: + /* External calibration of receive power requires + * floating point arithmetic. Doing that in the kernel + * is not easy, so just skip it. If the module does + * not require external calibration, we can however + * show receiver power, since FP is then not needed. + */ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL && + channel == 1) + return 0; + switch (attr) { + case hwmon_power_input: + case hwmon_power_min_alarm: + case hwmon_power_max_alarm: + case hwmon_power_lcrit_alarm: + case hwmon_power_crit_alarm: + case hwmon_power_min: + case hwmon_power_max: + case hwmon_power_lcrit: + case hwmon_power_crit: + return 0444; + default: + return 0; + } + default: + return 0; + } +} + +static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value) +{ + __be16 val; + int err; + + err = sfp_read(sfp, true, reg, &val, sizeof(val)); + if (err < 0) + return err; + + *value = be16_to_cpu(val); + + return 0; +} + +static void sfp_hwmon_to_rx_power(long *value) +{ + *value = DIV_ROUND_CLOSEST(*value, 100); +} + +static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset, + long *value) +{ + if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL) + *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset; +} + +static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope), + be16_to_cpu(sfp->diag.cal_t_offset), value); + + if (*value >= 0x8000) + *value -= 0x10000; + + *value = DIV_ROUND_CLOSEST(*value * 1000, 256); +} + +static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope), + be16_to_cpu(sfp->diag.cal_v_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope), + be16_to_cpu(sfp->diag.cal_txi_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 500); +} + +static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value) +{ + sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope), + be16_to_cpu(sfp->diag.cal_txpwr_offset), value); + + *value = DIV_ROUND_CLOSEST(*value, 10); +} + +static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_temp(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_vcc(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_bias(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_calibrate_tx_power(sfp, value); + + return 0; +} + +static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value) +{ + int err; + + err = sfp_hwmon_read_sensor(sfp, reg, value); + if (err < 0) + return err; + + sfp_hwmon_to_rx_power(value); + + return 0; +} + +static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_temp_input: + return sfp_hwmon_read_temp(sfp, SFP_TEMP, value); + + case hwmon_temp_lcrit: + *value = be16_to_cpu(sfp->diag.temp_low_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_min: + *value = be16_to_cpu(sfp->diag.temp_low_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + case hwmon_temp_max: + *value = be16_to_cpu(sfp->diag.temp_high_warn); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_crit: + *value = be16_to_cpu(sfp->diag.temp_high_alarm); + sfp_hwmon_calibrate_temp(sfp, value); + return 0; + + case hwmon_temp_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_LOW); + return 0; + + case hwmon_temp_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_LOW); + return 0; + + case hwmon_temp_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TEMP_HIGH); + return 0; + + case hwmon_temp_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TEMP_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_in_input: + return sfp_hwmon_read_vcc(sfp, SFP_VCC, value); + + case hwmon_in_lcrit: + *value = be16_to_cpu(sfp->diag.volt_low_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_min: + *value = be16_to_cpu(sfp->diag.volt_low_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_max: + *value = be16_to_cpu(sfp->diag.volt_high_warn); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_crit: + *value = be16_to_cpu(sfp->diag.volt_high_alarm); + sfp_hwmon_calibrate_vcc(sfp, value); + return 0; + + case hwmon_in_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_LOW); + return 0; + + case hwmon_in_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_LOW); + return 0; + + case hwmon_in_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_VCC_HIGH); + return 0; + + case hwmon_in_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_VCC_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_curr_input: + return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value); + + case hwmon_curr_lcrit: + *value = be16_to_cpu(sfp->diag.bias_low_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_min: + *value = be16_to_cpu(sfp->diag.bias_low_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_max: + *value = be16_to_cpu(sfp->diag.bias_high_warn); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_crit: + *value = be16_to_cpu(sfp->diag.bias_high_alarm); + sfp_hwmon_calibrate_bias(sfp, value); + return 0; + + case hwmon_curr_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_LOW); + return 0; + + case hwmon_curr_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TX_BIAS_HIGH); + return 0; + + case hwmon_curr_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.txpwr_low_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.txpwr_low_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.txpwr_high_warn); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.txpwr_high_alarm); + sfp_hwmon_calibrate_tx_power(sfp, value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN0_TXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM0_TXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value) +{ + u8 status; + int err; + + switch (attr) { + case hwmon_power_input: + return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value); + + case hwmon_power_lcrit: + *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_min: + *value = be16_to_cpu(sfp->diag.rxpwr_low_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_max: + *value = be16_to_cpu(sfp->diag.rxpwr_high_warn); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_crit: + *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm); + sfp_hwmon_to_rx_power(value); + return 0; + + case hwmon_power_lcrit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_LOW); + return 0; + + case hwmon_power_min_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_LOW); + return 0; + + case hwmon_power_max_alarm: + err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_WARN1_RXPWR_HIGH); + return 0; + + case hwmon_power_crit_alarm: + err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status)); + if (err < 0) + return err; + + *value = !!(status & SFP_ALARM1_RXPWR_HIGH); + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct sfp *sfp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + return sfp_hwmon_temp(sfp, attr, value); + case hwmon_in: + return sfp_hwmon_vcc(sfp, attr, value); + case hwmon_curr: + return sfp_hwmon_bias(sfp, attr, value); + case hwmon_power: + switch (channel) { + case 0: + return sfp_hwmon_tx_power(sfp, attr, value); + case 1: + return sfp_hwmon_rx_power(sfp, attr, value); + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops sfp_hwmon_ops = { + .is_visible = sfp_hwmon_is_visible, + .read = sfp_hwmon_read, +}; + +static u32 sfp_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_chip = { + .type = hwmon_chip, + .config = sfp_hwmon_chip_config, +}; + +static u32 sfp_hwmon_temp_config[] = { + HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | + HWMON_T_CRIT | HWMON_T_LCRIT | + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = { + .type = hwmon_temp, + .config = sfp_hwmon_temp_config, +}; + +static u32 sfp_hwmon_vcc_config[] = { + HWMON_I_INPUT | + HWMON_I_MAX | HWMON_I_MIN | + HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM | + HWMON_I_CRIT | HWMON_I_LCRIT | + HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = { + .type = hwmon_in, + .config = sfp_hwmon_vcc_config, +}; + +static u32 sfp_hwmon_bias_config[] = { + HWMON_C_INPUT | + HWMON_C_MAX | HWMON_C_MIN | + HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM | + HWMON_C_CRIT | HWMON_C_LCRIT | + HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = { + .type = hwmon_curr, + .config = sfp_hwmon_bias_config, +}; + +static u32 sfp_hwmon_power_config[] = { + /* Transmit power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + /* Receive power */ + HWMON_P_INPUT | + HWMON_P_MAX | HWMON_P_MIN | + HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM | + HWMON_P_CRIT | HWMON_P_LCRIT | + HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info sfp_hwmon_power_channel_info = { + .type = hwmon_power, + .config = sfp_hwmon_power_config, +}; + +static const struct hwmon_channel_info *sfp_hwmon_info[] = { + &sfp_hwmon_chip, + &sfp_hwmon_vcc_channel_info, + &sfp_hwmon_temp_channel_info, + &sfp_hwmon_bias_channel_info, + &sfp_hwmon_power_channel_info, + NULL, +}; + +static const struct hwmon_chip_info sfp_hwmon_chip_info = { + .ops = &sfp_hwmon_ops, + .info = sfp_hwmon_info, +}; + +static int sfp_hwmon_insert(struct sfp *sfp) +{ + int err, i; + + if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE) + return 0; + + if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) + return 0; + + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) + /* This driver in general does not support address + * change. + */ + return 0; + + err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag)); + if (err < 0) + return err; + + sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL); + if (!sfp->hwmon_name) + return -ENODEV; + + for (i = 0; sfp->hwmon_name[i]; i++) + if (hwmon_is_bad_char(sfp->hwmon_name[i])) + sfp->hwmon_name[i] = '_'; + + sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev, + sfp->hwmon_name, sfp, + &sfp_hwmon_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(sfp->hwmon_dev); +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ + hwmon_device_unregister(sfp->hwmon_dev); + kfree(sfp->hwmon_name); +} +#else +static int sfp_hwmon_insert(struct sfp *sfp) +{ + return 0; +} + +static void sfp_hwmon_remove(struct sfp *sfp) +{ +} +#endif + /* Helpers */ static void sfp_module_tx_disable(struct sfp *sfp) { @@ -636,6 +1357,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp) dev_warn(sfp->dev, "module address swap to access page 0xA2 is not supported.\n"); + ret = sfp_hwmon_insert(sfp); + if (ret < 0) + return ret; + ret = sfp_module_insert(sfp->sfp_bus, &sfp->id); if (ret < 0) return ret; @@ -647,6 +1372,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp) { sfp_module_remove(sfp->sfp_bus); + sfp_hwmon_remove(sfp); + if (sfp->mod_phy) sfp_sm_phy_detach(sfp); diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index d9dd8fbfffc7..fbf9ad429593 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -72,6 +72,10 @@ #define PHY_ID_VSC8572 0x000704d0 #define PHY_ID_VSC8574 0x000704a0 #define PHY_ID_VSC8601 0x00070420 +#define PHY_ID_VSC7385 0x00070450 +#define PHY_ID_VSC7388 0x00070480 +#define PHY_ID_VSC7395 0x00070550 +#define PHY_ID_VSC7398 0x00070580 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev) return err; } +#define VSC73XX_EXT_PAGE_ACCESS 0x1f + +static int vsc73xx_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS); +} + +static int vsc73xx_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page); +} + +static void vsc73xx_config_init(struct phy_device *phydev) +{ + /* Receiver init */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x0c, 0x0300, 0x0200); + phy_write(phydev, 0x1f, 0x0000); + + /* Config LEDs 0x61 */ + phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061); +} + +static int vsc738x_config_init(struct phy_device *phydev) +{ + u16 rev; + /* This magic sequence appear in the application note + * "VSC7385/7388 PHY Configuration". + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + /* Read revision */ + rev = phy_read(phydev, MII_PHYSID2); + rev &= 0x0f; + + /* Special quirk for revision 0 */ + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0689); + phy_write(phydev, 0x10, 0x8f92); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x12, 0x0000); + phy_write(phydev, 0x11, 0x0e35); + phy_write(phydev, 0x10, 0x9786); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x17, 0xff80); + phy_write(phydev, 0x17, 0x0000); + } + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + + if (rev == 0) { + phy_write(phydev, 0x1f, 0x2a30); + phy_write(phydev, 0x14, 0x6600); + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x18, 0xa24e); + } else { + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + /* bits 14-15 in extended register 0x14 controls DACG amplitude + * 6 = -8%, 2 is hardware default + */ + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + } + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc739x_config_init(struct phy_device *phydev) +{ + /* This magic sequence appears in the VSC7395 SparX-G5e application + * note "VSC7395/VSC7398 PHY Configuration" + * + * Maybe one day we will get to know what it all means. + */ + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0200); + phy_write(phydev, 0x1f, 0x52b5); + phy_write(phydev, 0x10, 0xb68a); + phy_modify(phydev, 0x12, 0xff07, 0x0003); + phy_modify(phydev, 0x11, 0x00ff, 0x00a2); + phy_write(phydev, 0x10, 0x968a); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x08, 0x0200, 0x0000); + phy_write(phydev, 0x1f, 0x0000); + + phy_write(phydev, 0x1f, 0x0000); + phy_write(phydev, 0x12, 0x0048); + phy_write(phydev, 0x1f, 0x2a30); + phy_modify(phydev, 0x16, 0x0fc0, 0x0240); + phy_modify(phydev, 0x14, 0x6000, 0x4000); + phy_write(phydev, 0x1f, 0x0001); + phy_modify(phydev, 0x14, 0xe000, 0x6000); + phy_write(phydev, 0x1f, 0x0000); + + vsc73xx_config_init(phydev); + + return genphy_config_init(phydev); +} + +static int vsc73xx_config_aneg(struct phy_device *phydev) +{ + /* The VSC73xx switches does not like to be instructed to + * do autonegotiation in any way, it prefers that you just go + * with the power-on/reset defaults. Writing some registers will + * just make autonegotiation permanently fail. + */ + return 0; +} + /* This adds a skew for both TX and RX clocks, so the skew should only be * applied to "rgmii-id" interfaces. It may not work as expected * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ @@ -319,6 +454,42 @@ static struct phy_driver vsc82xx_driver[] = { .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, }, { + .phy_id = PHY_ID_VSC7385, + .name = "Vitesse VSC7385", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7388, + .name = "Vitesse VSC7388", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc738x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7395, + .name = "Vitesse VSC7395", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { + .phy_id = PHY_ID_VSC7398, + .name = "Vitesse VSC7398", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .config_init = vsc739x_config_init, + .config_aneg = vsc73xx_config_aneg, + .read_page = vsc73xx_read_page, + .write_page = vsc73xx_write_page, +}, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, @@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8572, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC7385, 0x000ffff0 }, + { PHY_ID_VSC7388, 0x000ffff0 }, + { PHY_ID_VSC7395, 0x000ffff0 }, + { PHY_ID_VSC7398, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 2e5150b0b8d5..74a8782313cf 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -33,17 +33,22 @@ struct gmii2rgmii { struct phy_device *phy_dev; struct phy_driver *phy_drv; struct phy_driver conv_phy_drv; - int addr; + struct mdio_device *mdio; }; static int xgmiitorgmii_read_status(struct phy_device *phydev) { struct gmii2rgmii *priv = phydev->priv; + struct mii_bus *bus = priv->mdio->bus; + int addr = priv->mdio->addr; u16 val = 0; + int err; - priv->phy_drv->read_status(phydev); + err = priv->phy_drv->read_status(phydev); + if (err < 0) + return err; - val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG); val &= ~XILINX_GMII2RGMII_SPEED_MASK; if (phydev->speed == SPEED_1000) @@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) else val |= BMCR_SPEED10; - mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val); return 0; } @@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) return -EPROBE_DEFER; } - priv->addr = mdiodev->addr; + if (!priv->phy_dev->drv) { + dev_info(dev, "Attached phy not ready\n"); + return -EPROBE_DEFER; + } + + priv->mdio = mdiodev; priv->phy_drv = priv->phy_dev->drv; memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, sizeof(struct phy_driver)); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index b070959737ff..6a047d30e8c6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -41,11 +41,6 @@ #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) -static struct team_port *team_port_get_rcu(const struct net_device *dev) -{ - return rcu_dereference(dev->rx_handler_data); -} - static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); @@ -1707,7 +1702,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f5727baac84a..0a3134712652 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); u16 ret; @@ -1268,7 +1269,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) return tun_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = tun_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index b1b3d8f7e67d..b654f05b2ccd 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -693,24 +693,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) u32 phyid; struct asix_common_private *priv; - usbnet_get_endpoints(dev,intf); + usbnet_get_endpoints(dev, intf); - /* Get the MAC address */ - if (dev->driver_info->data & FLAG_EEPROM_MAC) { - for (i = 0; i < (ETH_ALEN >> 1); i++) { - ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, - 0, 2, buf + i * 2, 0); - if (ret < 0) - break; - } + /* Maybe the boot loader passed the MAC address via device tree */ + if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { + netif_dbg(dev, ifup, dev->net, + "MAC address read from device tree"); } else { - ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, - 0, 0, ETH_ALEN, buf, 0); - } + /* Try getting the MAC address from EEPROM */ + if (dev->driver_info->data & FLAG_EEPROM_MAC) { + for (i = 0; i < (ETH_ALEN >> 1); i++) { + ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, + 0x04 + i, 0, 2, buf + i * 2, + 0); + if (ret < 0) + break; + } + } else { + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, + 0, 0, ETH_ALEN, buf, 0); + } - if (ret < 0) { - netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); - return ret; + if (ret < 0) { + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", + ret); + return ret; + } } asix_set_netdev_dev_addr(dev, buf); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 18d36dff97ea..424053bd8b21 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); + /* fall through */ case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 288ecd999171..78b16eb9e58c 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -99,6 +99,7 @@ static void tx_complete(struct urb *req) struct net_device *dev = skb->dev; struct usbpn_dev *pnd = netdev_priv(dev); int status = req->status; + unsigned long flags; switch (status) { case 0: @@ -109,16 +110,17 @@ static void tx_complete(struct urb *req) case -ECONNRESET: case -ESHUTDOWN: dev->stats.tx_aborted_errors++; + /* fall through */ default: dev->stats.tx_errors++; dev_dbg(&dev->dev, "TX error (%d)\n", status); } dev->stats.tx_packets++; - spin_lock(&pnd->tx_lock); + spin_lock_irqsave(&pnd->tx_lock, flags); pnd->tx_queue--; netif_wake_queue(dev); - spin_unlock(&pnd->tx_lock); + spin_unlock_irqrestore(&pnd->tx_lock, flags); dev_kfree_skb_any(skb); usb_free_urb(req); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index e53883ad6107..184c24baca15 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb) struct hso_net *odev = urb->context; struct net_device *net; int result; + unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ @@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ - spin_lock(&odev->net_lock); + spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); - spin_unlock(&odev->net_lock); + spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for @@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); @@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb) if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } /* @@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb) DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { + unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; @@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb) (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } @@ -1729,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { - u8 *temp; unsigned long flags; int res; @@ -1745,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial) goto out; /* Switch pointers around to avoid memcpy */ - temp = serial->tx_buffer; - serial->tx_buffer = serial->tx_data; - serial->tx_data = temp; + swap(serial->tx_buffer, serial->tx_data); serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; - /* If temp is set, it means we switched buffers */ - if (temp && serial->write_data) { + /* If serial->tx_data is set, it means we switched buffers */ + if (serial->tx_data && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; @@ -1852,6 +1852,7 @@ static void intr_callback(struct urb *urb) struct hso_serial *serial; unsigned char *port_req; int status = urb->status; + unsigned long flags; int i; usb_mark_last_busy(urb->dev); @@ -1879,7 +1880,7 @@ static void intr_callback(struct urb *urb) if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on @@ -1893,7 +1894,8 @@ static void intr_callback(struct urb *urb) hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, + flags); } } } @@ -1920,6 +1922,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) { @@ -1927,9 +1930,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) return; } - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1971,14 +1974,15 @@ static void ctrl_callback(struct urb *urb) struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; + unsigned long flags; /* sanity check */ if (!serial) return; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; @@ -1994,9 +1998,9 @@ static void ctrl_callback(struct urb *urb) (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; - spin_lock(&serial->serial_lock); + spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); - spin_unlock(&serial->serial_lock); + spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index f1605833c5cf..913e50bab0a2 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb) struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; - + unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; @@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb) net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } - spin_lock(&kaweth->device_lock); + spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } - spin_unlock(&kaweth->device_lock); + spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index aeca484a75b8..4662fa0381f9 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1723,7 +1723,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) "MAC address read from EEPROM"); } else { /* generate random MAC */ - random_ether_addr(addr); + eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6514c86f043e..f4247b275e09 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1067,7 +1067,7 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg7b, 1); - mdelay(100); + msleep(100); if ((pegasus->features & HAS_HOME_PNA) && mii_mode) set_register(pegasus, Reg7b, 0); else diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2a58607a6aea..124211afb023 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb) int status = urb->status; struct rx_agg *agg; struct r8152 *tp; + unsigned long flags; agg = urb->context; if (!agg) @@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb) if (urb->actual_length < ETH_ZLEN) break; - spin_lock(&tp->rx_lock); + spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); - spin_unlock(&tp->rx_lock); + spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: @@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb) struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; + unsigned long flags; int status = urb->status; agg = urb->context; @@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb) stats->tx_bytes += agg->skb_len; } - spin_lock(&tp->tx_lock); + spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); - spin_unlock(&tp->tx_lock); + spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); @@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); + /* fall through */ case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, @@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) r8152_mdio_write(tp, MII_BMCR, data); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); + /* fall through */ default: if (data != PHY_STAT_LAN_ON) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 48ba80a8ca5c..80373a9171dd 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb) u16 rx_stat; int status = urb->status; int result; + unsigned long flags; dev = urb->context; if (!dev) @@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb) netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; - spin_lock(&dev->rx_pool_lock); + spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); - spin_unlock(&dev->rx_pool_lock); + spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 2d316c1b851b..6ac232e52bf7 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -358,7 +358,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) /* power up and reset phy */ sr_write_reg(dev, SR_PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ - mdelay(20); + msleep(20); sr_write_reg(dev, SR_PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2b6ec927809e..14f661cbae18 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -82,25 +82,43 @@ struct virtnet_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; + u64 xdp_tx; + u64 xdp_tx_drops; + u64 kicks; }; struct virtnet_rq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; + u64 drops; + u64 xdp_packets; + u64 xdp_tx; + u64 xdp_redirects; + u64 xdp_drops; + u64 kicks; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { - { "packets", VIRTNET_SQ_STAT(packets) }, - { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "packets", VIRTNET_SQ_STAT(packets) }, + { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, + { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, + { "kicks", VIRTNET_SQ_STAT(kicks) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { - { "packets", VIRTNET_RQ_STAT(packets) }, - { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "packets", VIRTNET_RQ_STAT(packets) }, + { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, + { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, + { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, + { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, + { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, + { "kicks", VIRTNET_RQ_STAT(kicks) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -447,22 +465,12 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } -static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, - struct xdp_frame *xdpf) +static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) { - struct xdp_frame *xdpf_sent; - struct send_queue *sq; - unsigned int len; unsigned int qp; qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; - - /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); - - return __virtnet_xdp_xmit_one(vi, sq, xdpf); + return &vi->sq[qp]; } static int virtnet_xdp_xmit(struct net_device *dev, @@ -474,23 +482,28 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; - unsigned int qp; int drops = 0; - int err; + int kicks = 0; + int ret, err; int i; - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) - return -EINVAL; + sq = virtnet_xdp_sq(vi); - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; + drops = n; + goto out; + } /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) - return -ENXIO; + if (!xdp_prog) { + ret = -ENXIO; + drops = n; + goto out; + } /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) @@ -505,11 +518,20 @@ static int virtnet_xdp_xmit(struct net_device *dev, drops++; } } + ret = n - drops; - if (flags & XDP_XMIT_FLUSH) - virtqueue_kick(sq->vq); + if (flags & XDP_XMIT_FLUSH) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) + kicks = 1; + } +out: + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.xdp_tx += n; + sq->stats.xdp_tx_drops += drops; + sq->stats.kicks += kicks; + u64_stats_update_end(&sq->stats.syncp); - return n - drops; + return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -587,7 +609,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rq_stats *stats) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -602,7 +624,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; - *rbytes += len; + stats->bytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -644,6 +666,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->xdp_packets++; switch (act) { case XDP_PASS: @@ -652,11 +675,12 @@ static struct sk_buff *receive_small(struct net_device *dev, len = xdp.data_end - xdp.data; break; case XDP_TX: + stats->xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - err = __virtnet_xdp_tx_xmit(vi, xdpf); - if (unlikely(err)) { + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); + if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -664,6 +688,7 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; @@ -697,7 +722,8 @@ err: err_xdp: rcu_read_unlock(); - dev->stats.rx_dropped++; + stats->xdp_drops++; + stats->drops++; put_page(page); xdp_xmit: return NULL; @@ -708,19 +734,19 @@ static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len, - unsigned int *rbytes) + struct virtnet_rq_stats *stats) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); - *rbytes += len - vi->hdr_len; + stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; return skb; err: - dev->stats.rx_dropped++; + stats->drops++; give_pages(rq, page); return NULL; } @@ -732,7 +758,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rq_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -745,7 +771,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; - *rbytes += len - vi->hdr_len; + stats->bytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -794,6 +820,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->xdp_packets++; switch (act) { case XDP_PASS: @@ -818,11 +845,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: + stats->xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; - err = __virtnet_xdp_tx_xmit(vi, xdpf); - if (unlikely(err)) { + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); + if (unlikely(err < 0)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -834,6 +862,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) { if (unlikely(xdp_page != page)) @@ -883,7 +912,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - *rbytes += len; + stats->bytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -929,6 +958,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err_xdp: rcu_read_unlock(); + stats->xdp_drops++; err_skb: put_page(page); while (num_buf-- > 1) { @@ -939,12 +969,12 @@ err_skb: dev->stats.rx_length_errors++; break; } - *rbytes += len; + stats->bytes += len; page = virt_to_head_page(buf); put_page(page); } err_buf: - dev->stats.rx_dropped++; + stats->drops++; dev_kfree_skb(head_skb); xdp_xmit: return NULL; @@ -952,7 +982,8 @@ xdp_xmit: static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit, unsigned int *rbytes) + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -973,11 +1004,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, - rbytes); + stats); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len, rbytes); + skb = receive_big(dev, vi, rq, buf, len, stats); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); if (unlikely(!skb)) return; @@ -1171,7 +1202,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, if (err) break; } while (rq->vq->num_free); - virtqueue_kick(rq->vq); + if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { + u64_stats_update_begin(&rq->stats.syncp); + rq->stats.kicks++; + u64_stats_update_end(&rq->stats.syncp); + } + return !oom; } @@ -1246,22 +1282,24 @@ static int virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0, bytes = 0; + struct virtnet_rq_stats stats = {}; + unsigned int len; void *buf; + int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (received < budget && + while (stats.packets < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); + stats.packets++; } } else { - while (received < budget && + while (stats.packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); + stats.packets++; } } @@ -1271,11 +1309,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_begin(&rq->stats.syncp); - rq->stats.bytes += bytes; - rq->stats.packets += received; + for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + size_t offset = virtnet_rq_stats_desc[i].offset; + u64 *item; + + item = (u64 *)((u8 *)&rq->stats + offset); + *item += *(u64 *)((u8 *)&stats + offset); + } u64_stats_update_end(&rq->stats.syncp); - return received; + return stats.packets; } static void free_old_xmit_skbs(struct send_queue *sq) @@ -1331,7 +1374,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; - unsigned int received, qp; + unsigned int received; unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1346,10 +1389,12 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush_map(); if (xdp_xmit & VIRTIO_XDP_TX) { - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + - smp_processor_id(); - sq = &vi->sq[qp]; - virtqueue_kick(sq->vq); + sq = virtnet_xdp_sq(vi); + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } } return received; @@ -1511,8 +1556,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (kick || netif_xmit_stopped(txq)) - virtqueue_kick(sq->vq); + if (kick || netif_xmit_stopped(txq)) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + } return NETDEV_TX_OK; } @@ -1616,7 +1666,7 @@ static void virtnet_stats(struct net_device *dev, int i; for (i = 0; i < vi->max_queue_pairs; i++) { - u64 tpackets, tbytes, rpackets, rbytes; + u64 tpackets, tbytes, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; @@ -1630,17 +1680,18 @@ static void virtnet_stats(struct net_device *dev, start = u64_stats_fetch_begin_irq(&rq->stats.syncp); rpackets = rq->stats.packets; rbytes = rq->stats.bytes; + rdrops = rq->stats.drops; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; + tot->rx_dropped += rdrops; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; - tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; } @@ -2348,7 +2399,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) return virtnet_xdp_set(dev, xdp->prog, xdp->extack); case XDP_QUERY_PROG: xdp->prog_id = virtnet_xdp_query(dev); - xdp->prog_attached = !!xdp->prog_id; return 0; default: return -EINVAL; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e857cb3335f6..ababba37d735 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return vh; } -static struct sk_buff **vxlan_gro_receive(struct sock *sk, - struct sk_buff **head, - struct sk_buff *skb) +static struct sk_buff *vxlan_gro_receive(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) { - struct sk_buff *p, **pp = NULL; + struct sk_buff *pp = NULL; + struct sk_buff *p; struct vxlanhdr *vh, *vh2; unsigned int hlen, off_vx; int flush = 1; @@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - for (p = *head; p; p = p->next) { + list_for_each_entry(p, head, list) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -2154,7 +2155,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->options_len) + if (info->options_len && + info->key.tun_flags & TUNNEL_VXLAN_OPT) md = ip_tunnel_info_opts(info); ttl = info->key.ttl; tos = info->key.tos; diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index bd46b2552980..2a3f0f1a2b0a 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2134,7 +2134,6 @@ static void fst_openport(struct fst_port_info *port) { int signals; - int txq_length; /* Only init things if card is actually running. This allows open to * succeed for downloads etc. @@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port) else netif_carrier_off(port_to_dev(port)); - txq_length = port->txqe - port->txqs; port->txqe = 0; port->txqs = 0; } diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 9b09c9d0d0fb..5f0366a125e2 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -192,7 +192,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), ALIGNMENT_OF_UCC_HDLC_PRAM); - if (priv->ucc_pram_offset < 0) { + if (IS_ERR_VALUE(priv->ucc_pram_offset)) { dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; @@ -230,14 +230,14 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) /* Alloc riptr, tiptr */ riptr = qe_muram_alloc(32, 32); - if (riptr < 0) { + if (IS_ERR_VALUE(riptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); ret = -ENOMEM; goto free_tx_skbuff; } tiptr = qe_muram_alloc(32, 32); - if (tiptr < 0) { + if (IS_ERR_VALUE(tiptr)) { dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); ret = -ENOMEM; goto free_riptr; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 90a4ad9a2d08..093bd21f574d 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev) lmc_softc_t *sc = dev_to_sc(dev); int i; int rx_work_limit = LMC_RXDESCS; - unsigned int next_rx; int rxIntLoopCnt; /* debug -baz */ int localLengthErrCnt = 0; long stat; @@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev) rxIntLoopCnt = 0; /* debug -baz */ i = sc->lmc_next_rx % LMC_RXDESCS; - next_rx = sc->lmc_next_rx; while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) { diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 4c417903e9be..094cea775d0c 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m, { int result; struct device *dev = i2400m_dev(i2400m); - unsigned ack_type, ack_status; + unsigned int ack_type; char strerr[32]; /* Chew on the message, we might need some information from * here */ ack_type = le16_to_cpu(l3l4_hdr->type); - ack_status = le16_to_cpu(l3l4_hdr->status); switch (ack_type) { case I2400M_MT_CMD_ENTER_POWERSAVE: /* This is just left here for the sake of example, as diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index a89b5685e68b..e9fc168bb734 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) int ret, itr; struct device *dev = i2400m_dev(i2400m); struct i2400m_fw *i2400m_fw; - const struct i2400m_bcf_hdr *bcf; /* Firmware data */ const struct firmware *fw; const char *fw_name; @@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) } /* Load firmware files to memory. */ - for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) { + for (itr = 0, ret = -ENOENT; ; itr++) { fw_name = i2400m->bus_fw_names[itr]; if (fw_name == NULL) { dev_err(dev, "Could not find a usable firmware image\n"); diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index a654687b5fa2..9ab3f0fdfea4 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, { struct net_device *net_dev = i2400m->wimax_dev.net_dev; struct device *dev = i2400m_dev(i2400m); - int protocol; d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n", i2400m, skb, skb->len, cs); switch(cs) { case I2400M_CS_IPV4_0: case I2400M_CS_IPV4: - protocol = ETH_P_IP; i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, skb->data - ETH_HLEN, cpu_to_be16(ETH_P_IP)); diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 84f071ac0d84..54ff5930126c 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -1,15 +1,15 @@ config ATH10K - tristate "Atheros 802.11ac wireless cards support" - depends on MAC80211 && HAS_DMA + tristate "Atheros 802.11ac wireless cards support" + depends on MAC80211 && HAS_DMA select ATH_COMMON select CRC32 select WANT_DEV_COREDUMP select ATH10K_CE - ---help--- - This module adds support for wireless adapters based on - Atheros IEEE 802.11ac family of chipsets. + ---help--- + This module adds support for wireless adapters based on + Atheros IEEE 802.11ac family of chipsets. - If you choose to build a module, it'll be called ath10k. + If you choose to build a module, it'll be called ath10k. config ATH10K_CE bool @@ -41,12 +41,12 @@ config ATH10K_USB work in progress and will not fully work. config ATH10K_SNOC - tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" - depends on ATH10K && ARCH_QCOM - ---help--- - This module adds support for integrated WCN3990 chip connected - to system NOC(SNOC). Currently work in progress and will not - fully work. + tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" + depends on ATH10K && ARCH_QCOM + ---help--- + This module adds support for integrated WCN3990 chip connected + to system NOC(SNOC). Currently work in progress and will not + fully work. config ATH10K_DEBUG bool "Atheros ath10k debugging" diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 3b96a43fbda4..18c709c484e7 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1512,7 +1512,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); if (ret) { dma_free_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + + (nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space_unaligned, base_addr); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index dbeffaef6024..b8fb5382dede 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -383,4 +383,46 @@ static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) return CE_INTERRUPT_SUMMARY; } +/* Host software's Copy Engine configuration. */ +#define CE_ATTR_FLAGS 0 + +/* + * Configuration information for a Copy Engine pipe. + * Passed from Host to Target during startup (one per CE). + * + * NOTE: Structure is shared between Host software and Target firmware! + */ +struct ce_pipe_config { + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +/* + * Directions for interconnect pipe configuration. + * These definitions may be used during configuration and are shared + * between Host and Target. + * + * Pipe Directions are relative to the Host, so PIPEDIR_IN means + * "coming IN over air through Target to Host" as with a WiFi Rx operation. + * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" + * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" + * Target since things that are "PIPEDIR_OUT" are coming IN to the Target + * over the interconnect. + */ +#define PIPEDIR_NONE 0 +#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ +#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ +#define PIPEDIR_INOUT 3 /* bidirectional */ + +/* Establish a mapping between a service/direction and a pipe. */ +struct service_to_pipe { + __le32 service_id; + __le32 pipedir; + __le32 pipenum; +}; + #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index ad4f6e3c0737..85c58ebbfb26 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -41,10 +41,8 @@ static bool uart_print; static bool skip_otp; static bool rawmode; -/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA - * by default. - */ -unsigned long ath10k_coredump_mask = 0x3; +unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) | + BIT(ATH10K_FW_CRASH_DUMP_CE_DATA); /* FIXME: most of these should be readonly */ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); @@ -82,6 +80,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -113,6 +112,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -145,6 +145,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -176,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -207,6 +209,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -238,6 +241,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -272,6 +276,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -309,6 +314,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, @@ -347,6 +353,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, + .spectral_bin_offset = 8, /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz * or 2x2 160Mhz, long-guard-interval. @@ -388,6 +395,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 12, + .spectral_bin_offset = 8, /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or * 1x1 160Mhz, long-guard-interval. @@ -423,6 +431,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca988x_ops, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -456,6 +465,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_cpu_freq = 176000000, .decap_align_bytes = 4, .spectral_bin_discard = 0, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 8, @@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &qca99x0_ops, .decap_align_bytes = 1, .spectral_bin_discard = 4, + .spectral_bin_offset = 0, .vht160_mcs_rx_highest = 0, .vht160_mcs_tx_highest = 0, .n_cipher_suites = 11, diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 951dbdd1c9eb..427ee5752bb0 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -48,7 +48,8 @@ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) -#define ATH10K_NUM_CHANS 40 +#define ATH10K_NUM_CHANS 41 +#define ATH10K_MAX_5G_CHAN 173 /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0d98c93a3aba..4926722e0c0d 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1727,7 +1727,9 @@ int ath10k_debug_start(struct ath10k *ar) ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); } - if (ar->debug.nf_cal_period) { + if (ar->debug.nf_cal_period && + !test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period, ar->debug.nf_cal_period); @@ -1744,7 +1746,9 @@ void ath10k_debug_stop(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); - ath10k_debug_cal_data_fetch(ar); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) + ath10k_debug_cal_data_fetch(ar); /* Must not use _sync to avoid deadlock, we do that in * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid @@ -2367,15 +2371,18 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); - debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, - &fops_cal_data); + if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, + ar->normal_mode_fw.fw_file.fw_features)) { + debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar, + &fops_cal_data); + + debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, + &fops_nf_cal_period); + } debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar, &fops_ani_enable); - debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar, - &fops_nf_cal_period); - if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy, ar, &fops_simulate_radar); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 5d8b97a0ccaa..89157c5b5e5f 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1202,7 +1202,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - /* pass through */ + /* fall through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_32; @@ -1404,7 +1404,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - /* pass through */ + /* fall through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_64; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 23467e9fefeb..a274bd809a08 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -586,6 +586,9 @@ struct ath10k_hw_params { /* target supporting retention restore on ddr */ bool rri_on_ddr; + + /* Number of bytes to be the offset for each FFT sample */ + int spectral_bin_offset; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 836e0a47b94a..541bc1c4b2f7 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7737,7 +7737,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw, return; sinfo->rx_duration = arsta->rx_duration; - sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); if (!arsta->txrate.legacy && !arsta->txrate.nss) return; @@ -7750,7 +7750,7 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.bw = arsta->txrate.bw; } sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } static const struct ieee80211_ops ath10k_ops = { @@ -7870,6 +7870,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), CHAN5G(169, 5845, 0), + CHAN5G(173, 5865, 0), + /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ + /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ }; struct ath10k *ath10k_mac_create(size_t priv_size) diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index e52fd83156b6..0ed436657108 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -86,48 +86,6 @@ struct pcie_state { /* PCIE_CONFIG_FLAG definitions */ #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001 -/* Host software's Copy Engine configuration. */ -#define CE_ATTR_FLAGS 0 - -/* - * Configuration information for a Copy Engine pipe. - * Passed from Host to Target during startup (one per CE). - * - * NOTE: Structure is shared between Host software and Target firmware! - */ -struct ce_pipe_config { - __le32 pipenum; - __le32 pipedir; - __le32 nentries; - __le32 nbytes_max; - __le32 flags; - __le32 reserved; -}; - -/* - * Directions for interconnect pipe configuration. - * These definitions may be used during configuration and are shared - * between Host and Target. - * - * Pipe Directions are relative to the Host, so PIPEDIR_IN means - * "coming IN over air through Target to Host" as with a WiFi Rx operation. - * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air" - * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man" - * Target since things that are "PIPEDIR_OUT" are coming IN to the Target - * over the interconnect. - */ -#define PIPEDIR_NONE 0 -#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */ -#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */ -#define PIPEDIR_INOUT 3 /* bidirectional */ - -/* Establish a mapping between a service/direction and a pipe. */ -struct service_to_pipe { - __le32 service_id; - __le32 pipedir; - __le32 pipenum; -}; - /* Per-pipe state. */ struct ath10k_pci_pipe { /* Handle of underlying Copy Engine */ diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index a3a7042fe13a..fa1843a7e0fd 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -14,19 +14,20 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include <linux/module.h> +#include <linux/clk.h> #include <linux/kernel.h> -#include "debug.h" -#include "hif.h" -#include "htc.h" -#include "ce.h" -#include "snoc.h" +#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <linux/clk.h> -#define WCN3990_CE_ATTR_FLAGS 0 + +#include "ce.h" +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "snoc.h" + #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 @@ -449,7 +450,7 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) static void ath10k_snoc_rx_replenish_retry(struct timer_list *t) { - struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); + struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); struct ath10k *ar = ar_snoc->ar; ath10k_snoc_rx_post(ar); @@ -820,7 +821,7 @@ static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { .write32 = ath10k_snoc_write32, }; -int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) +static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); int i; @@ -868,7 +869,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) return done; } -void ath10k_snoc_init_napi(struct ath10k *ar) +static void ath10k_snoc_init_napi(struct ath10k *ar) { netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, ATH10K_NAPI_BUDGET); @@ -1303,13 +1304,13 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; ar->ce_priv = &ar_snoc->ce; - ath10k_snoc_resource_init(ar); + ret = ath10k_snoc_resource_init(ar); if (ret) { ath10k_warn(ar, "failed to initialize resource: %d\n", ret); goto err_core_destroy; } - ath10k_snoc_setup_resource(ar); + ret = ath10k_snoc_setup_resource(ar); if (ret) { ath10k_warn(ar, "failed to setup resource: %d\n", ret); goto err_core_destroy; @@ -1388,25 +1389,7 @@ static struct platform_driver ath10k_snoc_driver = { .of_match_table = ath10k_snoc_dt_match, }, }; - -static int __init ath10k_snoc_init(void) -{ - int ret; - - ret = platform_driver_register(&ath10k_snoc_driver); - if (ret) - pr_err("failed to register ath10k snoc driver: %d\n", - ret); - - return ret; -} -module_init(ath10k_snoc_init); - -static void __exit ath10k_snoc_exit(void) -{ - platform_driver_unregister(&ath10k_snoc_driver); -} -module_exit(ath10k_snoc_exit); +module_platform_driver(ath10k_snoc_driver); MODULE_AUTHOR("Qualcomm"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 05dc98f46ccd..f9e530189d48 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -19,7 +19,6 @@ #include "hw.h" #include "ce.h" -#include "pci.h" struct ath10k_snoc_drv_priv { enum ath10k_hw_rev hw_rev; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index af6995de7e00..653b6d013207 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -145,7 +145,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]); bins = (u8 *)fftr; - bins += sizeof(*fftr); + bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset; fft_sample->tsf = __cpu_to_be64(tsf); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index f97ab795cf2e..877249ac6fd4 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2366,7 +2366,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) */ if (channel >= 1 && channel <= 14) { status->band = NL80211_BAND_2GHZ; - } else if (channel >= 36 && channel <= 169) { + } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to @@ -4602,10 +4602,6 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_pdev_tpc_config_event *)skb->data; - tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); - if (!tpc_stats) - return; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { @@ -4614,6 +4610,10 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); + if (!tpc_stats) + return; + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); @@ -5018,13 +5018,11 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id, void *vaddr; pool_size = num_units * round_up(unit_len, 4); - vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); + vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); if (!vaddr) return -ENOMEM; - memset(vaddr, 0, pool_size); - ar->wmi.mem_chunks[idx].vaddr = vaddr; ar->wmi.mem_chunks[idx].paddr = paddr; ar->wmi.mem_chunks[idx].len = pool_size; diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index f23c851765df..05140d8baa36 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -670,6 +670,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval) break; case NL80211_IFTYPE_ADHOC: AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM); + /* fall through */ default: /* On non-STA modes timer1 is used as next DMA * beacon alert (DBA) timer and timer2 as next diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 0687697d5e2d..e121187f371f 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -1811,20 +1811,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, if (vif->target_stats.rx_byte) { sinfo->rx_bytes = vif->target_stats.rx_byte; - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); sinfo->rx_packets = vif->target_stats.rx_pkt; - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } if (vif->target_stats.tx_byte) { sinfo->tx_bytes = vif->target_stats.tx_byte; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); sinfo->tx_packets = vif->target_stats.tx_pkt; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } sinfo->signal = vif->target_stats.cs_rssi; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); rate = vif->target_stats.tx_ucast_rate; @@ -1857,12 +1857,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, return 0; } - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); if (test_bit(CONNECTED, &vif->flags) && test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && vif->nw_type == INFRA_NETWORK) { - sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); sinfo->bss_param.flags = 0; sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; @@ -3899,16 +3899,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) switch (ar->hw.cap) { case WMI_11AN_CAP: ht = true; + /* fall through */ case WMI_11A_CAP: band_5gig = true; break; case WMI_11GN_CAP: ht = true; + /* fall through */ case WMI_11G_CAP: band_2gig = true; break; case WMI_11AGN_CAP: ht = true; + /* fall through */ case WMI_11AG_CAP: band_2gig = true; band_5gig = true; diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 2195b1b7a8a6..bb50680580f3 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1415,6 +1415,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x19))}, {}, }; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 7922550c2159..ef2dd68d3f77 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -583,12 +583,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah) case 0x5: REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, AR_PHY_SWAP_ALT_CHAIN); + /* fall through */ case 0x3: if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); break; } + /* else: fall through */ case 0x1: case 0x2: case 0x7: diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 61a9b85045d2..713291881208 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -119,6 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) aModeRefSel = 2; if (aModeRefSel) break; + /* else: fall through */ case 1: default: aModeRefSel = 0; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index f685843a2ff3..0a6eb8a8c1ed 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -538,7 +538,7 @@ static int read_file_interrupt(struct seq_file *file, void *data) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { PR_IS("RXLP", rxlp); PR_IS("RXHP", rxhp); - PR_IS("WATHDOG", bb_watchdog); + PR_IS("WATCHDOG", bb_watchdog); } else { PR_IS("RX", rxok); } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index e60bea4604e4..1665066f4e24 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah) ath_err(common, "eeprom contains invalid mac address: %pM\n", common->macaddr); - random_ether_addr(common->macaddr); + eth_random_addr(common->macaddr); ath_err(common, "random mac address will be used: %pM\n", common->macaddr); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index b6663c80e7dd..5eb1c0aea41d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1928,6 +1928,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: flush = true; + /* fall through */ case IEEE80211_AMPDU_TX_STOP_CONT: ath9k_ps_wakeup(sc); ath_tx_aggr_stop(sc, sta, tid); diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 78946f28d0c7..013d056a7a4c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -302,14 +302,14 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, sinfo->generation = wil->sinfo_gen; - sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_RX_BITRATE) | - BIT(NL80211_STA_INFO_TX_BITRATE) | - BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_TX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->txrate.flags = RATE_INFO_FLAGS_60G; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); @@ -322,7 +322,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, sinfo->tx_failed = stats->tx_errors; if (test_bit(wil_vif_fwconnected, vif->status)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) sinfo->signal = reply.evt.rssi; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index b01dc34d55af..3ed3d9f6aae9 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -1516,10 +1516,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, priv->present_callback = card_present; priv->card = card; priv->firmware = NULL; - priv->firmware_id[0] = '\0'; priv->firmware_type = fw_type; if (firmware) /* module parameter */ - strcpy(priv->firmware_id, firmware); + strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id)); priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI; priv->station_state = STATION_STATE_DOWN; priv->do_rx_crc = 0; @@ -2646,14 +2645,9 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) break; } - if (!(new_firmware = kmalloc(com.len, GFP_KERNEL))) { - rc = -ENOMEM; - break; - } - - if (copy_from_user(new_firmware, com.data, com.len)) { - kfree(new_firmware); - rc = -EFAULT; + new_firmware = memdup_user(com.data, com.len); + if (IS_ERR(new_firmware)) { + rc = PTR_ERR(new_firmware); break; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b6122aad639e..24c4e18e7d80 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2434,7 +2434,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si) struct nl80211_sta_flag_update *sfu; brcmf_dbg(TRACE, "flags %08x\n", fw_sta_flags); - si->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); + si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS); sfu = &si->sta_flags; sfu->mask = BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_AUTHENTICATED) | @@ -2470,7 +2470,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) brcmf_err("Failed to get bss info (%d)\n", err); goto out_kfree; } - si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period); si->bss_param.dtim_period = buf->bss_le.dtim_period; capability = le16_to_cpu(buf->bss_le.capability); @@ -2501,7 +2501,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err); return err; } - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->txrate.legacy = rate * 5; memset(&scbval, 0, sizeof(scbval)); @@ -2512,7 +2512,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, return err; } rssi = le32_to_cpu(scbval.val); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi; err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt, @@ -2521,10 +2521,10 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err); return err; } - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) | - BIT(NL80211_STA_INFO_RX_DROP_MISC) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt); sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt); sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt); @@ -2571,7 +2571,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, } } brcmf_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver)); - sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000; sta_flags = le32_to_cpu(sta_info_le.flags); brcmf_convert_sta_flags(sta_flags, sinfo); @@ -2581,33 +2581,33 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, else sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); if (sta_flags & BRCMF_STA_ASSOC) { - sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME); sinfo->connected_time = le32_to_cpu(sta_info_le.in); brcmf_fill_bss_param(ifp, sinfo); } if (sta_flags & BRCMF_STA_SCBSTATS) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures); - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts); sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts); - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts); sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts); if (sinfo->tx_packets) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate) / 100; } if (sinfo->rx_packets) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate) / 100; } if (le16_to_cpu(sta_info_le.ver) >= 4) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes); - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes); } total_rssi = 0; @@ -2623,10 +2623,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, } } if (count_rssi) { - sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); sinfo->chains = count_rssi; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); total_rssi /= count_rssi; sinfo->signal = total_rssi; } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, @@ -2639,7 +2639,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, goto done; } else { rssi = le32_to_cpu(scb_val.val); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi; brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 72954fd6df3b..b1f702faff4f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -21,6 +21,7 @@ #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <net/addrconf.h> +#include <net/ieee80211_radiotap.h> #include <net/ipv6.h> #include <brcmu_utils.h> #include <brcmu_wifi.h> @@ -404,6 +405,30 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) netif_rx_ni(skb); } +void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb) +{ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) { + /* Do nothing */ + } else { + struct ieee80211_radiotap_header *radiotap; + + /* TODO: use RX status to fill some radiotap data */ + radiotap = skb_push(skb, sizeof(*radiotap)); + memset(radiotap, 0, sizeof(*radiotap)); + radiotap->it_len = cpu_to_le16(sizeof(*radiotap)); + + /* TODO: 4 bytes with receive status? */ + skb->len -= 4; + } + + skb->dev = ifp->ndev; + skb_reset_mac_header(skb); + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + brcmf_netif_rx(ifp, skb); +} + static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, struct brcmf_if **ifp) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 401f50458686..dcf6e27cc16f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -121,6 +121,7 @@ struct brcmf_pub { struct brcmf_if *iflist[BRCMF_MAX_IFS]; s32 if2bss[BRCMF_MAX_IFS]; + struct brcmf_if *mon_if; struct mutex proto_block; unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; @@ -216,6 +217,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); +void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); int __init brcmf_core_init(void); void __exit brcmf_core_exit(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 800a423c7bc2..4db4d444407a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -48,6 +48,8 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = { { BRCMF_FEAT_MBSS, "mbss" }, { BRCMF_FEAT_MCHAN, "mchan" }, { BRCMF_FEAT_P2P, "p2p" }, + { BRCMF_FEAT_MONITOR, "monitor" }, + { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, }; #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index d1193825e559..0b4974df353a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -33,6 +33,8 @@ * MFP: 802.11w Management Frame Protection. * GSCAN: enhanced scan offload feature. * FWSUP: Firmware supplicant. + * MONITOR: firmware can pass monitor packets to host. + * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -48,7 +50,9 @@ BRCMF_FEAT_DEF(WOWL_ARP_ND) \ BRCMF_FEAT_DEF(MFP) \ BRCMF_FEAT_DEF(GSCAN) \ - BRCMF_FEAT_DEF(FWSUP) + BRCMF_FEAT_DEF(FWSUP) \ + BRCMF_FEAT_DEF(MONITOR) \ + BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 4b290705e3e6..d5bb81e88762 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -32,11 +32,30 @@ #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ #define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 -#define BRCMF_STA_WME 0x00000002 /* WMM association */ -#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */ -#define BRCMF_STA_ASSOC 0x00000010 /* Associated */ -#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */ -#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ +#define BRCMF_STA_WME 0x00000002 /* WMM association */ +#define BRCMF_STA_NONERP 0x00000004 /* No ERP */ +#define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */ +#define BRCMF_STA_ASSOC 0x00000010 /* Associated */ +#define BRCMF_STA_AUTHO 0x00000020 /* Authorized */ +#define BRCMF_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define BRCMF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define BRCMF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define BRCMF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define BRCMF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define BRCMF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define BRCMF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define BRCMF_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define BRCMF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define BRCMF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define BRCMF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define BRCMF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define BRCMF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define BRCMF_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define BRCMF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define BRCMF_STA_WPS 0x00200000 /* WPS state */ +#define BRCMF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */ +#define BRCMF_STA_DWDS 0x02000000 /* DWDS active */ /* size of brcmf_scan_params not including variable length array */ #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 @@ -155,6 +174,8 @@ #define BRCMF_MFP_CAPABLE 1 #define BRCMF_MFP_REQUIRED 2 +#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8 + /* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each * ioctl. It is relatively small because firmware has small maximum size input * playload restriction for ioctls. @@ -531,6 +552,8 @@ struct brcmf_sta_info_le { /* w/hi bit set if basic */ __le32 in; /* seconds elapsed since associated */ __le32 listen_interval_inms; /* Min Listen interval in ms for STA */ + + /* Fields valid for ver >= 3 */ __le32 tx_pkts; /* # of packets transmitted */ __le32 tx_failures; /* # of packets failed */ __le32 rx_ucast_pkts; /* # of unicast packets received */ @@ -539,6 +562,8 @@ struct brcmf_sta_info_le { __le32 rx_rate; /* Rate of last successful rx frame */ __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */ __le32 rx_decrypt_failures; /* # of packet decrypted failed */ + + /* Fields valid for ver >= 4 */ __le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */ __le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */ __le32 tx_mcast_pkts; /* # of mcast pkts txed */ @@ -575,6 +600,14 @@ struct brcmf_sta_info_le { */ __le32 rx_pkts_retried; /* # rx with retry bit set */ __le32 tx_rate_fallback; /* lowest fallback TX rate */ + + /* Fields valid for ver >= 5 */ + struct { + __le32 count; /* # rates in this set */ + u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */ + __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ + } rateset_adv; }; struct brcmf_chanspec_list { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index c40ba8855cd5..4e8397a0cbc8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -69,6 +69,8 @@ #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 +#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 +#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 @@ -1128,6 +1130,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) struct sk_buff *skb; u16 data_offset; u16 buflen; + u16 flags; u32 idx; struct brcmf_if *ifp; @@ -1137,6 +1140,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) data_offset = le16_to_cpu(rx_complete->data_offset); buflen = le16_to_cpu(rx_complete->data_len); idx = le32_to_cpu(rx_complete->msg.request_id); + flags = le16_to_cpu(rx_complete->flags); skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->rx_pktids, idx); @@ -1150,6 +1154,20 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) skb_trim(skb, buflen); + if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == + BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { + ifp = msgbuf->drvr->mon_if; + + if (!ifp) { + brcmf_err("Received unexpected monitor pkt\n"); + brcmu_pkt_buf_free_skb(skb); + return; + } + + brcmf_netif_mon_rx(ifp, skb); + return; + } + ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); if (!ifp || !ifp->ndev) { brcmf_err("Received pkt for invalid ifidx %d\n", diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index 3a13d176b221..35e3b101e5cf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -159,7 +159,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr) { u16 data; - if ((addr == RADIO_IDCODE)) + if (addr == RADIO_IDCODE) return 0xffff; switch (pi->pubpi.phy_type) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 1a187557982e..bedec1606caa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -16904,7 +16904,7 @@ static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi) } } -void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi) +static void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi) { static const u8 rfseq_rx2tx_events[] = { NPHY_RFSEQ_CMD_NOP, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c index b9672da24a9d..b24bc57ca91b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c @@ -213,7 +213,7 @@ static const s16 log_table[] = { 30498, 31267, 32024, - 32768 + 32767 }; #define LOG_TABLE_SIZE 32 /* log_table size */ diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index b8fd3cc90634..1ad83ef5f202 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len) static void schedule_reset(struct ipw2100_priv *priv) { - unsigned long now = get_seconds(); + time64_t now = ktime_get_boottime_seconds(); /* If we haven't received a reset request within the backoff period, * then we can reset the backoff interval so this reset occurs @@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv) (now - priv->last_reset > priv->reset_backoff)) priv->reset_backoff = 0; - priv->last_reset = get_seconds(); + priv->last_reset = now; if (!(priv->status & STATUS_RESET_PENDING)) { - IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n", + IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n", priv->net_dev->name, priv->reset_backoff); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); @@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) memcpy(priv->bssid, bssid, ETH_ALEN); priv->status |= STATUS_ASSOCIATING; - priv->connect_start = get_seconds(); + priv->connect_start = ktime_get_boottime_seconds(); schedule_delayed_work(&priv->wx_event_work, HZ / 10); } @@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, #define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x) if (priv->status & STATUS_ASSOCIATED) - len += sprintf(buf + len, "connected: %lu\n", - get_seconds() - priv->connect_start); + len += sprintf(buf + len, "connected: %llu\n", + ktime_get_boottime_seconds() - priv->connect_start); else len += sprintf(buf + len, "not connected\n"); @@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, DUMP_VAR(txq_stat.lo, "d"); DUMP_VAR(ieee->scans, "d"); - DUMP_VAR(reset_backoff, "d"); + DUMP_VAR(reset_backoff, "lld"); return len; } @@ -6437,7 +6437,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) pci_disable_device(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); mutex_unlock(&priv->action_mutex); @@ -6482,7 +6482,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ if (!(priv->status & STATUS_RF_KILL_SW)) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index ce3e35f6b60f..8c11c7fa2eef 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h @@ -491,7 +491,7 @@ struct ipw2100_priv { /* Statistics */ int resets; - int reset_backoff; + time64_t reset_backoff; /* Context */ u8 essid[IW_ESSID_MAX_SIZE]; @@ -500,8 +500,8 @@ struct ipw2100_priv { u8 channel; int last_mode; - unsigned long connect_start; - unsigned long last_reset; + time64_t connect_start; + time64_t last_reset; u32 channel_mask; u32 fatal_error; @@ -581,9 +581,9 @@ struct ipw2100_priv { int user_requested_scan; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend, using CLOCK_BOOTTIME */ + time64_t suspend_at; + time64_t suspend_time; u32 interrupts; int tx_interrupts; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 8a858f7e36f4..9644e7b93645 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) { u32 ret = 0; - if ((priv == NULL)) + if (!priv) return 0; if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) @@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); return 0; } @@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ schedule_work(&priv->up); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index aa301d1eee3c..f98ab1f71edd 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -1343,9 +1343,9 @@ struct ipw_priv { s8 tx_power; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend using CLOCK_BOOTIME */ + time64_t suspend_at; + time64_t suspend_time; #ifdef CONFIG_PM u32 pm_state[16]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a6e072234398..26021bc55e98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4216,7 +4216,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (mvmsta->avg_energy) { sinfo->signal_avg = mvmsta->avg_energy; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (!fw_has_capa(&mvm->fw->ucode_capa, @@ -4240,11 +4240,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 18e819d964f1..998dfac0fcff 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2,6 +2,7 @@ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen <j@w1.fi> * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com> + * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2517,6 +2518,123 @@ out_err: nlmsg_free(mcast_skb); } +static const struct ieee80211_sband_iftype_data he_capa_2ghz = { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes unset, as + * DCM, beam forming, RU and PPE threshold information + * are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xffff), + .tx_mcs_160 = cpu_to_le16(0xffff), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + }, +}; + +static const struct ieee80211_sband_iftype_data he_capa_5ghz = { + /* TODO: should we support other types, e.g., P2P?*/ + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + + /* Leave all the other PHY capability bytes unset, as + * DCM, beam forming, RU and PPE threshold information + * are not supported + */ + }, + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xfffa), + .tx_mcs_80p80 = cpu_to_le16(0xfffa), + }, + }, +}; + +static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) +{ + if (sband->band == NL80211_BAND_2GHZ) + sband->iftype_data = + (struct ieee80211_sband_iftype_data *)&he_capa_2ghz; + else if (sband->band == NL80211_BAND_5GHZ) + sband->iftype_data = + (struct ieee80211_sband_iftype_data *)&he_capa_5ghz; + else + return; + + sband->n_iftype_data = 1; +} + static int mac80211_hwsim_new_radio(struct genl_info *info, struct hwsim_new_radio_params *param) { @@ -2678,6 +2796,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; + + sband->band = band; + switch (band) { case NL80211_BAND_2GHZ: sband->channels = data->channels_2ghz; @@ -2734,6 +2855,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + mac80211_hswim_he_capab(sband); + hw->wiphy->bands[band] = sband; } diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index f99031cfdf86..57edfada0665 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -1559,10 +1559,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, int ret; size_t i; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_RX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->tx_bytes = priv->dev->stats.tx_bytes; sinfo->tx_packets = priv->dev->stats.tx_packets; sinfo->rx_bytes = priv->dev->stats.rx_bytes; @@ -1572,14 +1572,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { sinfo->signal = signal; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } /* Convert priv->cur_rate from hw_value to NL80211 value */ for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { if (priv->cur_rate == lbs_rates[i].hw_value) { sinfo->txrate.legacy = lbs_rates[i].bitrate; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); break; } } diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index dd1ee1f0af48..469134930026 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -104,6 +104,7 @@ struct lbs_private { u8 fw_ready; u8 surpriseremoved; u8 setup_fw_on_resume; + u8 power_up_on_resume; int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); void (*reset_card) (struct lbs_private *priv); int (*power_save) (struct lbs_private *priv); diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 2300e796c6ab..43743c26c071 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func) static int if_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - int ret; struct if_sdio_card *card = sdio_get_drvdata(func); + struct lbs_private *priv = card->priv; + int ret; mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); + priv->power_up_on_resume = false; /* If we're powered off anyway, just let the mmc layer remove the * card. */ - if (!lbs_iface_active(card->priv)) - return -ENOSYS; + if (!lbs_iface_active(priv)) { + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; + } dev_info(dev, "%s: suspend: PM flags = 0x%x\n", sdio_func_id(func), flags); @@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev) /* If we aren't being asked to wake on anything, we should bail out * and let the SD stack power down the card. */ - if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { + if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { dev_info(dev, "Suspend without wake params -- powering down card\n"); - return -ENOSYS; + if (priv->fw_ready) { + priv->power_up_on_resume = true; + if_sdio_power_off(card); + } + + return 0; } if (!(flags & MMC_PM_KEEP_POWER)) { @@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev) if (ret) return ret; - ret = lbs_suspend(card->priv); + ret = lbs_suspend(priv); if (ret) return ret; @@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev) dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func)); + if (card->priv->power_up_on_resume) { + if_sdio_power_on(card); + wait_event(card->pwron_waitq, card->priv->fw_ready); + } + ret = lbs_resume(card->priv); return ret; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index ffea610f67e2..c67a8e7be310 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -614,6 +614,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, struct if_usb_card *cardp, struct lbs_private *priv) { + unsigned long flags; u8 i; if (recvlength > LBS_CMD_BUFFER_SIZE) { @@ -623,9 +624,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, return; } - BUG_ON(!in_interrupt()); - - spin_lock(&priv->driver_lock); + spin_lock_irqsave(&priv->driver_lock, flags); i = (priv->resp_idx == 0) ? 1 : 0; BUG_ON(priv->resp_len[i]); @@ -635,7 +634,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, kfree_skb(skb); lbs_notify_command_response(priv, i); - spin_unlock(&priv->driver_lock); + spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_usbd(&cardp->udev->dev, "Wake up main thread to handle cmd response\n"); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 5153922e7ce1..e92fc5001171 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -603,6 +603,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, struct if_usb_card *cardp, struct lbtf_private *priv) { + unsigned long flags; + if (recvlength > LBS_CMD_BUFFER_SIZE) { lbtf_deb_usbd(&cardp->udev->dev, "The receive buffer is too large\n"); @@ -610,14 +612,12 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, return; } - BUG_ON(!in_interrupt()); - - spin_lock(&priv->driver_lock); + spin_lock_irqsave(&priv->driver_lock, flags); memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, recvlength - MESSAGE_HEADER_LEN); kfree_skb(skb); lbtf_cmd_response_rx(priv); - spin_unlock(&priv->driver_lock); + spin_unlock_irqrestore(&priv->driver_lock, flags); } /** diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4b5ae9098504..c02e02c17c9c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1353,17 +1353,17 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, { u32 rate; - sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) | - BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) | - BIT(NL80211_STA_INFO_TX_BITRATE) | - BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES) | + BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_BITRATE) | + BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { if (!node) return -ENOENT; - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) | - BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->inactive_time = jiffies_to_msecs(jiffies - node->stats.last_rx); @@ -1413,7 +1413,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, sinfo->txrate.legacy = rate * 5; if (priv->bss_mode == NL80211_IFTYPE_STATION) { - sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); sinfo->bss_param.flags = 0; if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap & WLAN_CAPABILITY_SHORT_PREAMBLE) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 510f6b8e717d..fa3e8ddfe9a9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 1e6a62c69ac5..5ce85d5727e4 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -289,32 +289,6 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv, src_node->stats.rx_packets++; } - skb->dev = priv->netdev; - skb->protocol = eth_type_trans(skb, priv->netdev); - skb->ip_summed = CHECKSUM_NONE; - - /* This is required only in case of 11n and USB/PCIE as we alloc - * a buffer of 4K only if its 11N (to be able to receive 4K - * AMSDU packets). In case of SD we allocate buffers based - * on the size of packet and hence this is not needed. - * - * Modifying the truesize here as our allocation for each - * skb is 4K but we only receive 2K packets and this cause - * the kernel to start dropping packets in case where - * application has allocated buffer based on 2K size i.e. - * if there a 64K packet received (in IP fragments and - * application allocates 64K to receive this packet but - * this packet would almost double up because we allocate - * each 1.5K fragment in 4K and pass it up. As soon as the - * 64K limit hits kernel will start to drop rest of the - * fragments. Currently we fail the Filesndl-ht.scr script - * for UDP, hence this fix - */ - if ((adapter->iface_type == MWIFIEX_USB || - adapter->iface_type == MWIFIEX_PCIE) && - (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) - skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); - if (is_multicast_ether_addr(p_ethhdr->h_dest) || mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) { if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) @@ -350,6 +324,32 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv, return 0; } + skb->dev = priv->netdev; + skb->protocol = eth_type_trans(skb, priv->netdev); + skb->ip_summed = CHECKSUM_NONE; + + /* This is required only in case of 11n and USB/PCIE as we alloc + * a buffer of 4K only if its 11N (to be able to receive 4K + * AMSDU packets). In case of SD we allocate buffers based + * on the size of packet and hence this is not needed. + * + * Modifying the truesize here as our allocation for each + * skb is 4K but we only receive 2K packets and this cause + * the kernel to start dropping packets in case where + * application has allocated buffer based on 2K size i.e. + * if there a 64K packet received (in IP fragments and + * application allocates 64K to receive this packet but + * this packet would almost double up because we allocate + * each 1.5K fragment in 4K and pass it up. As soon as the + * 64K limit hits kernel will start to drop rest of the + * fragments. Currently we fail the Filesndl-ht.scr script + * for UDP, hence this fix + */ + if ((adapter->iface_type == MWIFIEX_USB || + adapter->iface_type == MWIFIEX_PCIE) && + skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE) + skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); + /* Forward multicast/broadcast packet to upper layer*/ if (in_interrupt()) netif_rx(skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index d2166fbf50ff..96e9798bb8a0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -390,6 +390,18 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev); int mt76_eeprom_init(struct mt76_dev *dev, int len); void mt76_eeprom_override(struct mt76_dev *dev); +/* increment with wrap-around */ +static inline int mt76_incr(int val, int size) +{ + return (val + 1) & (size - 1); +} + +/* decrement with wrap-around */ +static inline int mt76_decr(int val, int size) +{ + return (val - 1) & (size - 1); +} + static inline struct ieee80211_txq * mtxq_to_txq(struct mt76_txq *mtxq) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index dc12bbdbb2ee..71fcfa44fb2e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -27,6 +27,7 @@ #include <linux/mutex.h> #include <linux/bitops.h> #include <linux/kfifo.h> +#include <linux/average.h> #define MT7662_FIRMWARE "mt7662.bin" #define MT7662_ROM_PATCH "mt7662_rom_patch.bin" @@ -47,6 +48,8 @@ #include "mt76x2_mac.h" #include "mt76x2_dfs.h" +DECLARE_EWMA(signal, 10, 8) + struct mt76x2_mcu { struct mutex mutex; @@ -69,9 +72,8 @@ struct mt76x2_calibration { u8 agc_gain_init[MT_MAX_CHAINS]; u8 agc_gain_cur[MT_MAX_CHAINS]; - int avg_rssi[MT_MAX_CHAINS]; - int avg_rssi_all; - + u16 false_cca; + s8 avg_rssi_all; s8 agc_gain_adjust; s8 low_gain; @@ -120,10 +122,13 @@ struct mt76x2_dev { u8 beacon_mask; u8 beacon_data_mask; - u32 rxfilter; + u8 tbtt_count; + u16 beacon_int; u16 chainmask; + u32 rxfilter; + struct mt76x2_calibration cal; s8 target_power; @@ -149,6 +154,9 @@ struct mt76x2_sta { struct mt76x2_vif *vif; struct mt76x2_tx_status status; int n_frames; + + struct ewma_signal rssi; + int inactive_count; }; static inline bool is_mt7612(struct mt76x2_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c index 955ea3e692dd..74725902e6dc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c @@ -91,12 +91,20 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data) struct mt76x2_dev *dev = file->private; struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + seq_printf(file, "allocated sequences:\t%d\n", + dfs_pd->seq_stats.seq_pool_len); + seq_printf(file, "used sequences:\t\t%d\n", + dfs_pd->seq_stats.seq_len); + seq_puts(file, "\n"); + for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { seq_printf(file, "engine: %d\n", i); seq_printf(file, " hw pattern detected:\t%d\n", dfs_pd->stats[i].hw_pattern); seq_printf(file, " hw pulse discarded:\t%d\n", dfs_pd->stats[i].hw_pulse_discarded); + seq_printf(file, " sw pattern detected:\t%d\n", + dfs_pd->stats[i].sw_pattern); } return 0; @@ -115,6 +123,18 @@ static const struct file_operations fops_dfs_stat = { .release = single_release, }; +static int read_agc(struct seq_file *file, void *data) +{ + struct mt76x2_dev *dev = dev_get_drvdata(file->private); + + seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all); + seq_printf(file, "low_gain: %d\n", dev->cal.low_gain); + seq_printf(file, "false_cca: %d\n", dev->cal.false_cca); + seq_printf(file, "agc_gain_adjust: %d\n", dev->cal.agc_gain_adjust); + + return 0; +} + void mt76x2_init_debugfs(struct mt76x2_dev *dev) { struct dentry *dir; @@ -130,4 +150,6 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev) debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, read_txpower); + + debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c index f936dc9a5476..374cc655c11d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c @@ -159,6 +159,81 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } +static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, + struct mt76x2_dfs_sequence *seq) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + list_add(&seq->head, &dfs_pd->seq_pool); + + dfs_pd->seq_stats.seq_pool_len++; + dfs_pd->seq_stats.seq_len--; +} + +static +struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq; + + if (list_empty(&dfs_pd->seq_pool)) { + seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC); + } else { + seq = list_first_entry(&dfs_pd->seq_pool, + struct mt76x2_dfs_sequence, + head); + list_del(&seq->head); + dfs_pd->seq_stats.seq_pool_len--; + } + if (seq) + dfs_pd->seq_stats.seq_len++; + + return seq; +} + +static int mt76x2_dfs_get_multiple(int val, int frac, int margin) +{ + int remainder, factor; + + if (!frac) + return 0; + + if (abs(val - frac) <= margin) + return 1; + + factor = val / frac; + remainder = val % frac; + + if (remainder > margin) { + if ((frac - remainder) <= margin) + factor++; + else + factor = 0; + } + return factor; +} + +static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq, *tmp_seq; + int i; + + /* reset hw detector */ + mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + + /* reset sw detector */ + for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { + dfs_pd->event_rb[i].h_rb = 0; + dfs_pd->event_rb[i].t_rb = 0; + } + + list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { + list_del_init(&seq->head); + mt76x2_dfs_seq_pool_put(dev, seq); + } +} + static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) { bool ret = false; @@ -295,6 +370,256 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, return ret; } +static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + u32 data; + + /* 1st: DFS_R37[31]: 0 (engine 0) - 1 (engine 2) + * 2nd: DFS_R37[21:0]: pulse time + * 3rd: DFS_R37[11:0]: pulse width + * 3rd: DFS_R37[25:16]: phase + * 4th: DFS_R37[12:0]: current pwr + * 4th: DFS_R37[21:16]: pwr stable counter + * + * 1st: DFS_R37[31:0] set to 0xffffffff means no event detected + */ + data = mt76_rr(dev, MT_BBP(DFS, 37)); + if (!MT_DFS_CHECK_EVENT(data)) + return false; + + event->engine = MT_DFS_EVENT_ENGINE(data); + data = mt76_rr(dev, MT_BBP(DFS, 37)); + event->ts = MT_DFS_EVENT_TIMESTAMP(data); + data = mt76_rr(dev, MT_BBP(DFS, 37)); + event->width = MT_DFS_EVENT_WIDTH(data); + + return true; +} + +static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + if (event->engine == 2) { + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; + u16 last_event_idx; + u32 delta_ts; + + last_event_idx = mt76_decr(event_buff->t_rb, + MT_DFS_EVENT_BUFLEN); + delta_ts = event->ts - event_buff->data[last_event_idx].ts; + if (delta_ts < MT_DFS_EVENT_TIME_MARGIN && + event_buff->data[last_event_idx].width >= 200) + return false; + } + return true; +} + +static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff; + + /* add radar event to ring buffer */ + event_buff = event->engine == 2 ? &dfs_pd->event_rb[1] + : &dfs_pd->event_rb[0]; + event_buff->data[event_buff->t_rb] = *event; + event_buff->data[event_buff->t_rb].fetch_ts = jiffies; + + event_buff->t_rb = mt76_incr(event_buff->t_rb, MT_DFS_EVENT_BUFLEN); + if (event_buff->t_rb == event_buff->h_rb) + event_buff->h_rb = mt76_incr(event_buff->h_rb, + MT_DFS_EVENT_BUFLEN); +} + +static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event, + u16 cur_len) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sw_detector_params *sw_params; + u32 width_delta, with_sum, factor, cur_pri; + struct mt76x2_dfs_sequence seq, *seq_p; + struct mt76x2_dfs_event_rb *event_rb; + struct mt76x2_dfs_event *cur_event; + int i, j, end, pri; + + event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] + : &dfs_pd->event_rb[0]; + + i = mt76_decr(event_rb->t_rb, MT_DFS_EVENT_BUFLEN); + end = mt76_decr(event_rb->h_rb, MT_DFS_EVENT_BUFLEN); + + while (i != end) { + cur_event = &event_rb->data[i]; + with_sum = event->width + cur_event->width; + + sw_params = &dfs_pd->sw_dpd_params; + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + case NL80211_DFS_JP: + if (with_sum < 600) + width_delta = 8; + else + width_delta = with_sum >> 3; + break; + case NL80211_DFS_ETSI: + if (event->engine == 2) + width_delta = with_sum >> 6; + else if (with_sum < 620) + width_delta = 24; + else + width_delta = 8; + break; + case NL80211_DFS_UNSET: + default: + return -EINVAL; + } + + pri = event->ts - cur_event->ts; + if (abs(event->width - cur_event->width) > width_delta || + pri < sw_params->min_pri) + goto next; + + if (pri > sw_params->max_pri) + break; + + seq.pri = event->ts - cur_event->ts; + seq.first_ts = cur_event->ts; + seq.last_ts = event->ts; + seq.engine = event->engine; + seq.count = 2; + + j = mt76_decr(i, MT_DFS_EVENT_BUFLEN); + while (j != end) { + cur_event = &event_rb->data[j]; + cur_pri = event->ts - cur_event->ts; + factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri, + sw_params->pri_margin); + if (factor > 0) { + seq.first_ts = cur_event->ts; + seq.count++; + } + + j = mt76_decr(j, MT_DFS_EVENT_BUFLEN); + } + if (seq.count <= cur_len) + goto next; + + seq_p = mt76x2_dfs_seq_pool_get(dev); + if (!seq_p) + return -ENOMEM; + + *seq_p = seq; + INIT_LIST_HEAD(&seq_p->head); + list_add(&seq_p->head, &dfs_pd->sequences); +next: + i = mt76_decr(i, MT_DFS_EVENT_BUFLEN); + } + return 0; +} + +static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, + struct mt76x2_dfs_event *event) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sw_detector_params *sw_params; + struct mt76x2_dfs_sequence *seq, *tmp_seq; + u16 max_seq_len = 0; + u32 factor, pri; + + sw_params = &dfs_pd->sw_dpd_params; + list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { + if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) { + list_del_init(&seq->head); + mt76x2_dfs_seq_pool_put(dev, seq); + continue; + } + + if (event->engine != seq->engine) + continue; + + pri = event->ts - seq->last_ts; + factor = mt76x2_dfs_get_multiple(pri, seq->pri, + sw_params->pri_margin); + if (factor > 0) { + seq->last_ts = event->ts; + seq->count++; + max_seq_len = max_t(u16, max_seq_len, seq->count); + } + } + return max_seq_len; +} + +static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_sequence *seq; + + if (list_empty(&dfs_pd->sequences)) + return false; + + list_for_each_entry(seq, &dfs_pd->sequences, head) { + if (seq->count > MT_DFS_SEQUENCE_TH) { + dfs_pd->stats[seq->engine].sw_pattern++; + return true; + } + } + return false; +} + +static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event event; + int i, seq_len; + + /* disable debug mode */ + mt76x2_dfs_set_capture_mode_ctrl(dev, false); + for (i = 0; i < MT_DFS_EVENT_LOOP; i++) { + if (!mt76x2_dfs_fetch_event(dev, &event)) + break; + + if (dfs_pd->last_event_ts > event.ts) + mt76x2_dfs_detector_reset(dev); + dfs_pd->last_event_ts = event.ts; + + if (!mt76x2_dfs_check_event(dev, &event)) + continue; + + seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event); + mt76x2_dfs_create_sequence(dev, &event, seq_len); + + mt76x2_dfs_queue_event(dev, &event); + } + mt76x2_dfs_set_capture_mode_ctrl(dev, true); +} + +static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x2_dfs_event_rb *event_buff; + struct mt76x2_dfs_event *event; + int i; + + for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { + event_buff = &dfs_pd->event_rb[i]; + + while (event_buff->h_rb != event_buff->t_rb) { + event = &event_buff->data[event_buff->h_rb]; + + /* sorted list */ + if (time_is_after_jiffies(event->fetch_ts + + MT_DFS_EVENT_WINDOW)) + break; + event_buff->h_rb = mt76_incr(event_buff->h_rb, + MT_DFS_EVENT_BUFLEN); + } + } +} + static void mt76x2_dfs_tasklet(unsigned long arg) { struct mt76x2_dev *dev = (struct mt76x2_dev *)arg; @@ -305,6 +630,24 @@ static void mt76x2_dfs_tasklet(unsigned long arg) if (test_bit(MT76_SCANNING, &dev->mt76.state)) goto out; + if (time_is_before_jiffies(dfs_pd->last_sw_check + + MT_DFS_SW_TIMEOUT)) { + bool radar_detected; + + dfs_pd->last_sw_check = jiffies; + + mt76x2_dfs_add_events(dev); + radar_detected = mt76x2_dfs_check_detection(dev); + if (radar_detected) { + /* sw detector rx radar pattern */ + ieee80211_radar_detected(dev->mt76.hw); + mt76x2_dfs_detector_reset(dev); + + return; + } + mt76x2_dfs_check_event_window(dev); + } + engine_mask = mt76_rr(dev, MT_BBP(DFS, 1)); if (!(engine_mask & 0xf)) goto out; @@ -326,9 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg) /* hw detector rx radar pattern */ dfs_pd->stats[i].hw_pattern++; ieee80211_radar_detected(dev->mt76.hw); - - /* reset hw detector */ - mt76_wr(dev, MT_BBP(DFS, 1), 0xf); + mt76x2_dfs_detector_reset(dev); return; } @@ -340,6 +681,32 @@ out: mt76x2_irq_enable(dev, MT_INT_GPTIMER); } +static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) +{ + struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + + switch (dev->dfs_pd.region) { + case NL80211_DFS_FCC: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN; + break; + case NL80211_DFS_ETSI: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_ETSI_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_ETSI_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN << 2; + break; + case NL80211_DFS_JP: + dfs_pd->sw_dpd_params.max_pri = MT_DFS_JP_MAX_PRI; + dfs_pd->sw_dpd_params.min_pri = MT_DFS_JP_MIN_PRI; + dfs_pd->sw_dpd_params.pri_margin = MT_DFS_PRI_MARGIN; + break; + case NL80211_DFS_UNSET: + default: + break; + } +} + static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) { u32 data; @@ -462,6 +829,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && dev->dfs_pd.region != NL80211_DFS_UNSET) { + mt76x2_dfs_init_sw_detector(dev); mt76x2_dfs_set_bbp_params(dev); /* enable debug mode */ mt76x2_dfs_set_capture_mode_ctrl(dev, true); @@ -486,7 +854,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) { struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + INIT_LIST_HEAD(&dfs_pd->sequences); + INIT_LIST_HEAD(&dfs_pd->seq_pool); dfs_pd->region = NL80211_DFS_UNSET; + dfs_pd->last_sw_check = jiffies; tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, (unsigned long)dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h index 8dbc783cc6bc..693f421bf096 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h @@ -33,6 +33,22 @@ #define MT_DFS_PKT_END_MASK 0 #define MT_DFS_CH_EN 0xf +/* sw detector params */ +#define MT_DFS_EVENT_LOOP 64 +#define MT_DFS_SW_TIMEOUT (HZ / 20) +#define MT_DFS_EVENT_WINDOW (HZ / 5) +#define MT_DFS_SEQUENCE_WINDOW (200 * (1 << 20)) +#define MT_DFS_EVENT_TIME_MARGIN 2000 +#define MT_DFS_PRI_MARGIN 4 +#define MT_DFS_SEQUENCE_TH 6 + +#define MT_DFS_FCC_MAX_PRI ((28570 << 1) + 1000) +#define MT_DFS_FCC_MIN_PRI (3000 - 2) +#define MT_DFS_JP_MAX_PRI ((80000 << 1) + 1000) +#define MT_DFS_JP_MIN_PRI (28500 - 2) +#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000) +#define MT_DFS_ETSI_MIN_PRI (4500 - 20) + struct mt76x2_radar_specs { u8 mode; u16 avg_len; @@ -50,6 +66,32 @@ struct mt76x2_radar_specs { u16 pwr_jmp; }; +#define MT_DFS_CHECK_EVENT(x) ((x) != GENMASK(31, 0)) +#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0) +#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0)) +#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0)) +struct mt76x2_dfs_event { + unsigned long fetch_ts; + u32 ts; + u16 width; + u8 engine; +}; + +#define MT_DFS_EVENT_BUFLEN 256 +struct mt76x2_dfs_event_rb { + struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN]; + int h_rb, t_rb; +}; + +struct mt76x2_dfs_sequence { + struct list_head head; + u32 first_ts; + u32 last_ts; + u32 pri; + u16 count; + u8 engine; +}; + struct mt76x2_dfs_hw_pulse { u8 engine; u32 period; @@ -58,9 +100,21 @@ struct mt76x2_dfs_hw_pulse { u32 burst; }; +struct mt76x2_dfs_sw_detector_params { + u32 min_pri; + u32 max_pri; + u32 pri_margin; +}; + struct mt76x2_dfs_engine_stats { u32 hw_pattern; u32 hw_pulse_discarded; + u32 sw_pattern; +}; + +struct mt76x2_dfs_seq_stats { + u32 seq_pool_len; + u32 seq_len; }; struct mt76x2_dfs_pattern_detector { @@ -69,6 +123,16 @@ struct mt76x2_dfs_pattern_detector { u8 chirp_pulse_cnt; u32 chirp_pulse_ts; + struct mt76x2_dfs_sw_detector_params sw_dpd_params; + struct mt76x2_dfs_event_rb event_rb[2]; + + struct list_head sequences; + struct list_head seq_pool; + struct mt76x2_dfs_seq_stats seq_stats; + + unsigned long last_sw_check; + u32 last_event_ts; + struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; struct tasklet_struct dfs_tasklet; }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index b49aea4da2d6..fc9af79b3e69 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -269,21 +269,31 @@ static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len) skb_pull(skb, len); } -static struct mt76_wcid * -mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, u8 idx, bool unicast) +static struct mt76x2_sta * +mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) { - struct mt76x2_sta *sta; struct mt76_wcid *wcid; if (idx >= ARRAY_SIZE(dev->wcid)) return NULL; wcid = rcu_dereference(dev->wcid[idx]); - if (unicast || !wcid) - return wcid; + if (!wcid) + return NULL; + + return container_of(wcid, struct mt76x2_sta, wcid); +} + +static struct mt76_wcid * +mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast) +{ + if (!sta) + return NULL; - sta = container_of(wcid, struct mt76x2_sta, wcid); - return &sta->vif->group_wcid; + if (unicast) + return &sta->wcid; + else + return &sta->vif->group_wcid; } int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, @@ -291,6 +301,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; struct mt76x2_rxwi *rxwi = rxi; + struct mt76x2_sta *sta; u32 rxinfo = le32_to_cpu(rxwi->rxinfo); u32 ctl = le32_to_cpu(rxwi->ctl); u16 rate = le16_to_cpu(rxwi->rate); @@ -315,7 +326,8 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, } wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); - status->wcid = mt76x2_rx_get_sta_wcid(dev, wcid, unicast); + sta = mt76x2_rx_get_sta(dev, wcid); + status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); @@ -361,6 +373,11 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); + if (sta) { + ewma_signal_add(&sta->rssi, status->signal); + sta->inactive_count = 0; + } + return mt76x2_mac_process_rate(status, rate); } @@ -439,15 +456,13 @@ mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev, if (last_rate < IEEE80211_TX_MAX_RATES - 1) rate[last_rate + 1].idx = -1; - cur_idx = rate[last_rate].idx + st->retry; + cur_idx = rate[last_rate].idx + last_rate; for (i = 0; i <= last_rate; i++) { rate[i].flags = rate[last_rate].flags; rate[i].idx = max_t(int, 0, cur_idx - i); rate[i].count = 1; } - - if (last_rate > 0) - rate[last_rate - 1].count = st->retry + 1 - last_rate; + rate[last_rate].count = st->retry + 1 - last_rate; info->status.ampdu_len = n_frames; info->status.ampdu_ack_len = st->success ? n_frames : 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index ce90ff999b49..3c0ebe6d231c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -238,10 +238,13 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (changed & BSS_CHANGED_BSSID) mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid); - if (changed & BSS_CHANGED_BEACON_INT) + if (changed & BSS_CHANGED_BEACON_INT) { mt76_rmw_field(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_INTVAL, info->beacon_int << 4); + dev->beacon_int = info->beacon_int; + dev->tbtt_count = 0; + } if (changed & BSS_CHANGED_BEACON_ENABLED) { tasklet_disable(&dev->pre_tbtt_tasklet); @@ -291,6 +294,8 @@ mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_AP) set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + ewma_signal_init(&msta->rssi); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index c1c38ca3330a..20ffa6a40d39 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -485,12 +485,14 @@ static void mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) { u32 false_cca; - u8 limit = dev->cal.low_gain > 1 ? 4 : 16; + u8 limit = dev->cal.low_gain > 0 ? 16 : 4; false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); + dev->cal.false_cca = false_cca; if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) dev->cal.agc_gain_adjust += 2; - else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0) + else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || + (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) dev->cal.agc_gain_adjust -= 2; else return; @@ -498,60 +500,115 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) mt76x2_phy_set_gain_val(dev); } +static int +mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) +{ + struct mt76x2_sta *sta; + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + sta = container_of(wcid, struct mt76x2_sta, wcid); + spin_lock(&dev->mt76.rx_lock); + if (sta->inactive_count++ < 5) + cur_rssi = ewma_signal_read(&sta->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->mt76.rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!min_rssi) + return -75; + + return min_rssi; +} + static void mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) { - u32 val = mt76_rr(dev, MT_BBP(AGC, 20)); - int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val); - int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val); u8 *gain = dev->cal.agc_gain_init; - u8 gain_delta; + u8 low_gain_delta, gain_delta; + bool gain_change; int low_gain; + u32 val; - dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + - (rssi0 << 8) / 16; - dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + - (rssi1 << 8) / 16; - dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + - dev->cal.avg_rssi[1]) / 512; + dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); - if (dev->cal.low_gain == low_gain) { + gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); + dev->cal.low_gain = low_gain; + + if (!gain_change) { mt76x2_phy_adjust_vga_gain(dev); return; } - dev->cal.low_gain = low_gain; - - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); - else + val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; + if (low_gain == 2) + val |= 0x3; + else + val |= 0x5; + mt76_wr(dev, MT_BBP(AGC, 26), val); + } else { mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); + } - if (low_gain) { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); + if (mt76x2_has_ext_lna(dev)) + low_gain_delta = 10; + else + low_gain_delta = 14; + + if (low_gain == 2) { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); - if (mt76x2_has_ext_lna(dev)) - gain_delta = 10; - else - gain_delta = 14; + gain_delta = low_gain_delta; + dev->cal.agc_gain_adjust = 0; } else { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); else mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); gain_delta = 0; + dev->cal.agc_gain_adjust = low_gain_delta; } dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; - dev->cal.agc_gain_adjust = 0; mt76x2_phy_set_gain_val(dev); + + /* clear false CCA counters */ + mt76_rr(dev, MT_RX_STAT_1); } int mt76x2_phy_set_channel(struct mt76x2_dev *dev, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c index e46eafc4c436..560376dd1133 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c @@ -218,6 +218,37 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) data->tail[mvif->idx] = skb; } +static void +mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) +{ + u32 timer_val = dev->beacon_int << 4; + + dev->tbtt_count++; + + /* + * Beacon timer drifts by 1us every tick, the timer is configured + * in 1/16 TU (64us) units. + */ + if (dev->tbtt_count < 62) + return; + + if (dev->tbtt_count >= 64) { + dev->tbtt_count = 0; + return; + } + + /* + * The updated beacon interval takes effect after two TBTT, because + * at this point the original interval has already been loaded into + * the next TBTT_TIMER value + */ + if (dev->tbtt_count == 62) + timer_val -= 1; + + mt76_rmw_field(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_INTVAL, timer_val); +} + void mt76x2_pre_tbtt_tasklet(unsigned long arg) { struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; @@ -226,6 +257,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg) struct sk_buff *skb; int i, nframes; + mt76x2_resync_beacon_timer(dev); + data.dev = dev; __skb_queue_head_init(&data.q); diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index ae0ca8006849..656ddc659218 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -1013,6 +1013,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT) wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; + if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) + wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) { wiphy->regulatory_flags |= REGULATORY_STRICT_REG | REGULATORY_CUSTOM_REG; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index c5d94a95e21a..42a598f92539 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -640,83 +640,83 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo, return; if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) { - sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); sinfo->inactive_time = le32_to_cpu(stats->inactive_time); } if (qtnf_sta_stat_avail(connected_time, QLINK_STA_INFO_CONNECTED_TIME)) { - sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME); sinfo->connected_time = le32_to_cpu(stats->connected_time); } if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = stats->signal - QLINK_RSSI_OFFSET; } if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) { - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET; } if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate); } if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate); } if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS); qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags); } if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); } if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); } if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes); } if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes); } if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); sinfo->rx_packets = le32_to_cpu(stats->rx_packets); } if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); sinfo->tx_packets = le32_to_cpu(stats->tx_packets); } if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) { - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon); } if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) { - sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc); } if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) { - sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); sinfo->tx_failed = le32_to_cpu(stats->tx_failed); } @@ -2234,6 +2234,22 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb, qchan->chan.flags = cpu_to_le32(flags); } +static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, + const u8 *mac_addr, + const u8 *mac_addr_mask) +{ + struct qlink_random_mac_addr *randmac; + struct qlink_tlv_hdr *hdr = + skb_put(cmd_skb, sizeof(*hdr) + sizeof(*randmac)); + + hdr->type = cpu_to_le16(QTN_TLV_ID_RANDOM_MAC_ADDR); + hdr->len = cpu_to_le16(sizeof(*randmac)); + randmac = (struct qlink_random_mac_addr *)hdr->val; + + memcpy(randmac->mac_addr, mac_addr, ETH_ALEN); + memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN); +} + int qtnf_cmd_send_scan(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb; @@ -2291,6 +2307,15 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) } } + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n", + mac->macid, + scan_req->mac_addr, scan_req->mac_addr_mask); + + qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr, + scan_req->mac_addr_mask); + } + ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); if (unlikely(ret)) diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index a6a450984f9a..c318340e1bd5 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -179,6 +179,30 @@ static void qtnf_netdev_tx_timeout(struct net_device *ndev) } } +static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + struct sockaddr *sa = addr; + int ret; + unsigned char old_addr[ETH_ALEN]; + + memcpy(old_addr, sa->sa_data, sizeof(old_addr)); + + ret = eth_mac_addr(ndev, sa); + if (ret) + return ret; + + qtnf_scan_done(vif->mac, true); + + ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype, + sa->sa_data); + + if (ret) + memcpy(ndev->dev_addr, old_addr, ETH_ALEN); + + return ret; +} + /* Network device ops handlers */ const struct net_device_ops qtnf_netdev_ops = { .ndo_open = qtnf_netdev_open, @@ -186,6 +210,7 @@ const struct net_device_ops qtnf_netdev_ops = { .ndo_start_xmit = qtnf_netdev_hard_start_xmit, .ndo_tx_timeout = qtnf_netdev_tx_timeout, .ndo_get_stats64 = qtnf_netdev_get_stats64, + .ndo_set_mac_address = qtnf_netdev_set_mac_address, }; static int qtnf_mac_init_single_band(struct wiphy *wiphy, diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index f85deda703fb..4a32967d0479 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -69,11 +69,14 @@ struct qlink_msg_header { * associated STAs due to inactivity. Inactivity timeout period is taken * from QLINK_CMD_START_AP parameters. * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality + * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address + * Randomization in probe requests. */ enum qlink_hw_capab { QLINK_HW_CAPAB_REG_UPDATE = BIT(0), QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), + QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), }; enum qlink_iface_type { @@ -1089,6 +1092,7 @@ enum qlink_tlv_id { QTN_TLV_ID_HW_ID = 0x0405, QTN_TLV_ID_CALIBRATION_VER = 0x0406, QTN_TLV_ID_UBOOT_VER = 0x0407, + QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408, }; struct qlink_tlv_hdr { @@ -1360,4 +1364,20 @@ struct qlink_sta_stats { u8 rsvd[1]; }; +/** + * struct qlink_random_mac_addr - data for QTN_TLV_ID_RANDOM_MAC_ADDR TLV + * + * Specifies MAC address mask/value for generation random MAC address + * during scan. + * + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr + */ +struct qlink_random_mac_addr { + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +} __packed; + #endif /* _QTN_QLINK_H_ */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 54c9f6ab0c8c..f4122c8fdd97 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1907,7 +1907,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) reject_agg, ctrl_agg_size, agg_size); rtlpriv->hw->max_rx_aggregation_subframes = - (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF); + (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT); } EXPORT_SYMBOL(rtl_rx_ampdu_apply); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 9935bd09db1f..51e4e92d95a0 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2480,7 +2480,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev, ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len); if (ret == 0) { sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; - sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } len = sizeof(rssi); @@ -2488,7 +2488,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev, &rssi, &len); if (ret == 0) { sinfo->signal = level_to_qual(le32_to_cpu(rssi)); - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } } @@ -2928,6 +2928,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev, while (buflen >= sizeof(*auth_req)) { auth_req = (void *)buf; + if (buflen < le32_to_cpu(auth_req->length)) + return; type = "unknown"; flags = le32_to_cpu(auth_req->flags); pairwise_error = false; diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 86ccf84ea0c6..597e934c4630 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -20,6 +20,8 @@ * */ +#include <linux/pm_runtime.h> + #include "../wlcore/debugfs.h" #include "../wlcore/wlcore.h" #include "../wlcore/debug.h" @@ -276,15 +278,18 @@ static ssize_t radar_detection_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl18xx_cmd_radar_detection_debug(wl, channel); if (ret < 0) count = ret; - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -315,15 +320,18 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl18xx_acx_dynamic_fw_traces(wl); if (ret < 0) count = ret; - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -374,9 +382,11 @@ static ssize_t radar_debug_mode_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_ap(wl, wlvif) { wlcore_cmd_generic_cfg(wl, wlvif, @@ -384,7 +394,8 @@ static ssize_t radar_debug_mode_write(struct file *file, wl->radar_debug_mode, 0); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c index 3ca9167d6146..7c83915a7c5e 100644 --- a/drivers/net/wireless/ti/wlcore/acx.c +++ b/drivers/net/wireless/ti/wlcore/acx.c @@ -31,7 +31,6 @@ #include "wlcore.h" #include "debug.h" #include "wl12xx_80211.h" -#include "ps.h" #include "hw_ops.h" int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 761cf8573a80..903968735a74 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi.h> #include <linux/etherdevice.h> #include <linux/ieee80211.h> @@ -191,6 +192,12 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + goto free_vector; + } + do { if (time_after(jiffies, timeout_time)) { wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", @@ -222,6 +229,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); +free_vector: kfree(events_vector); return ret; } diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index a2cb408be8aa..aeb74e74698e 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -26,6 +26,7 @@ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/pm_runtime.h> #include "wlcore.h" #include "debug.h" @@ -65,9 +66,11 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (!wl->plt && time_after(jiffies, wl->stats.fw_stats_update + @@ -76,7 +79,8 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -118,14 +122,18 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, return; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + return; + } chip_op = arg; chip_op(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } @@ -292,9 +300,11 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* In case we're already in PSM, trigger it again to set new timeout * immediately without waiting for re-association @@ -305,7 +315,8 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -359,9 +370,11 @@ static ssize_t forced_ps_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* In case we're already in PSM, trigger it again to switch mode * immediately without waiting for re-association @@ -374,7 +387,8 @@ static ssize_t forced_ps_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, ps_mode); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -838,15 +852,18 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl->conf.rx_streaming.interval = value; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -893,15 +910,18 @@ static ssize_t rx_streaming_always_write(struct file *file, wl->conf.rx_streaming.always = value; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -940,15 +960,18 @@ static ssize_t beacon_filtering_write(struct file *file, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1019,16 +1042,19 @@ static ssize_t sleep_auth_write(struct file *file, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_acx_sleep_auth(wl, value); if (ret < 0) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return count; @@ -1083,7 +1109,7 @@ static ssize_t dev_mem_read(struct file *file, * Don't fail if elp_wakeup returns an error, so the device's memory * could be read even if the FW crashed */ - wl1271_ps_elp_wakeup(wl); + pm_runtime_get_sync(wl->dev); /* store current partition and switch partition */ memcpy(&old_part, &wl->curr_part, sizeof(old_part)); @@ -1102,7 +1128,8 @@ read_err: goto part_err; part_err: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); skip_read: mutex_unlock(&wl->mutex); @@ -1164,7 +1191,7 @@ static ssize_t dev_mem_write(struct file *file, const char __user *user_buf, * Don't fail if elp_wakeup returns an error, so the device's memory * could be read even if the FW crashed */ - wl1271_ps_elp_wakeup(wl); + pm_runtime_get_sync(wl->dev); /* store current partition and switch partition */ memcpy(&old_part, &wl->curr_part, sizeof(old_part)); @@ -1183,7 +1210,8 @@ write_err: goto part_err; part_err: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); skip_write: mutex_unlock(&wl->mutex); @@ -1247,8 +1275,9 @@ static ssize_t fw_logger_write(struct file *file, } mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); count = ret; goto out; } @@ -1257,7 +1286,8 @@ static ssize_t fw_logger_write(struct file *file, ret = wl12xx_cmd_config_fwlog(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 3a51ab116e79..37f785f601c1 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -26,6 +26,7 @@ #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/pm_runtime.h> #include "wlcore.h" #include "debug.h" @@ -43,6 +44,7 @@ #define WL1271_BOOT_RETRIES 3 #define WL1271_SUSPEND_SLEEP 100 +#define WL1271_WAKEUP_TIMEOUT 500 static char *fwlog_param; static int fwlog_mem_blocks = -1; @@ -153,9 +155,11 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) if (!wl->conf.rx_streaming.interval) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_set_rx_streaming(wl, wlvif, true); if (ret < 0) @@ -166,7 +170,8 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -183,16 +188,19 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_set_rx_streaming(wl, wlvif, false); if (ret) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -229,9 +237,11 @@ static void wlcore_rc_update_work(struct work_struct *work) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (ieee80211_vif_is_mesh(vif)) { ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap, @@ -243,7 +253,8 @@ static void wlcore_rc_update_work(struct work_struct *work) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -539,15 +550,16 @@ static int wlcore_irq_locked(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } while (!done && loopcount--) { /* * In order to avoid a race with the hardirq, clear the flag - * before acknowledging the chip. Since the mutex is held, - * wl1271_ps_elp_wakeup cannot be called concurrently. + * before acknowledging the chip. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); smp_mb__after_atomic(); @@ -641,7 +653,8 @@ static int wlcore_irq_locked(struct wl1271 *wl) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: return ret; @@ -796,8 +809,6 @@ void wl12xx_queue_recovery_work(struct wl1271 *wl) wl->state = WLCORE_STATE_RESTARTING; set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); - wl1271_ps_elp_wakeup(wl); - wlcore_disable_interrupts_nosync(wl); ieee80211_queue_work(wl->hw, &wl->recovery_work); } } @@ -819,6 +830,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) static void wl12xx_read_fwlog_panic(struct wl1271 *wl) { u32 end_of_log = 0; + int error; if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) return; @@ -830,8 +842,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl) * Do not send a stop fwlog command if the fw is hanged or if * dbgpins are used (due to some fw bug). */ - if (wl1271_ps_elp_wakeup(wl)) + error = pm_runtime_get_sync(wl->dev); + if (error < 0) { + pm_runtime_put_noidle(wl->dev); return; + } if (!wl->watchdog_recovery && wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) wl12xx_cmd_stop_fwlog(wl); @@ -919,12 +934,20 @@ static void wl1271_recovery_work(struct work_struct *work) container_of(work, struct wl1271, recovery_work); struct wl12xx_vif *wlvif; struct ieee80211_vif *vif; + int error; mutex_lock(&wl->mutex); if (wl->state == WLCORE_STATE_OFF || wl->plt) goto out_unlock; + error = pm_runtime_get_sync(wl->dev); + if (error < 0) { + wl1271_warning("Enable for recovery failed"); + pm_runtime_put_noidle(wl->dev); + } + wlcore_disable_interrupts_nosync(wl); + if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST) wl12xx_read_fwlog_panic(wl); @@ -958,6 +981,8 @@ static void wl1271_recovery_work(struct work_struct *work) } wlcore_op_stop_locked(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); ieee80211_restart_hw(wl->hw); @@ -978,24 +1003,6 @@ static int wlcore_fw_wakeup(struct wl1271 *wl) return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); } -static int wlcore_fw_sleep(struct wl1271 *wl) -{ - int ret; - - mutex_lock(&wl->mutex); - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto out; - } - set_bit(WL1271_FLAG_IN_ELP, &wl->flags); -out: - mutex_unlock(&wl->mutex); - mdelay(WL1271_SUSPEND_SLEEP); - - return 0; -} - static int wl1271_setup(struct wl1271 *wl) { wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL); @@ -1184,7 +1191,6 @@ int wl1271_plt_stop(struct wl1271 *wl) wl1271_flush_deferred_work(wl); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->recovery_work); - cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); mutex_lock(&wl->mutex); @@ -1719,6 +1725,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif; + unsigned long flags; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); @@ -1734,8 +1741,9 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); mutex_unlock(&wl->mutex); return ret; } @@ -1765,6 +1773,7 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, goto out_sleep; out_sleep: + pm_runtime_put_noidle(wl->dev); mutex_unlock(&wl->mutex); if (ret < 0) { @@ -1775,21 +1784,7 @@ out_sleep: /* flush any remaining work */ wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); - /* - * disable and re-enable interrupts in order to flush - * the threaded_irq - */ - wlcore_disable_interrupts(wl); - - /* - * set suspended flag to avoid triggering a new threaded_irq - * work. no need for spinlock as interrupts are disabled. - */ - set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); - - wlcore_enable_interrupts(wl); flush_work(&wl->tx_work); - flush_delayed_work(&wl->elp_work); /* * Cancel the watchdog even if above tx_flush failed. We will detect @@ -1798,15 +1793,14 @@ out_sleep: cancel_delayed_work(&wl->tx_watchdog_work); /* - * Use an immediate call for allowing the firmware to go into power - * save during suspend. - * Using a workque for this last write was only hapenning on resume - * leaving the firmware with power save disabled during suspend, - * while consuming full power during wowlan suspend. + * set suspended flag to avoid triggering a new threaded_irq + * work. */ - wlcore_fw_sleep(wl); + spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); + spin_unlock_irqrestore(&wl->wl_lock, flags); - return 0; + return pm_runtime_force_suspend(wl->dev); } static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) @@ -1821,6 +1815,12 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) wl->wow_enabled); WARN_ON(!wl->wow_enabled); + ret = pm_runtime_force_resume(wl->dev); + if (ret < 0) { + wl1271_error("ELP wakeup failure!"); + goto out_sleep; + } + /* * re-enable irq_work enqueuing, and call irq_work directly if * there is a pending work. @@ -1857,9 +1857,11 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -1878,7 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: wl->wow_enabled = false; @@ -1945,7 +1948,6 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) cancel_delayed_work_sync(&wl->scan_complete_work); cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->tx_work); - cancel_delayed_work_sync(&wl->elp_work); cancel_delayed_work_sync(&wl->tx_watchdog_work); /* let's notify MAC80211 about the remaining pending TX frames */ @@ -2060,13 +2062,16 @@ static void wlcore_channel_switch_work(struct work_struct *work) vif = wl12xx_wlvif_to_vif(wlvif); ieee80211_chswitch_done(vif, false); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_cmd_stop_channel_switch(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2128,14 +2133,17 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) if (!time_after(time_spare, wlvif->pending_auth_reply_time)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -2537,9 +2545,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, wl12xx_get_vif_count(hw, vif, &vif_count); mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) - goto out_unlock; /* * in some very corner case HW recovery scenarios its possible to @@ -2568,14 +2573,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, if (ret < 0) goto out; - if (wl12xx_need_fw_change(wl, vif_count, true)) { - wl12xx_force_active_psm(wl); - set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); - mutex_unlock(&wl->mutex); - wl1271_recovery_work(&wl->recovery_work); - return 0; - } - /* * TODO: after the nvs issue will be solved, move this block * to start(), and make sure here the driver is ON. @@ -2592,6 +2589,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, goto out; } + /* + * Call runtime PM only after possible wl12xx_init_fw() above + * is done. Otherwise we do not have interrupts enabled. + */ + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); + goto out_unlock; + } + + if (wl12xx_need_fw_change(wl, vif_count, true)) { + wl12xx_force_active_psm(wl); + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + mutex_unlock(&wl->mutex); + wl1271_recovery_work(&wl->recovery_work); + return 0; + } + if (!wlcore_is_p2p_mgmt(wlvif)) { ret = wl12xx_cmd_role_enable(wl, vif->addr, role_type, &wlvif->role_id); @@ -2622,7 +2637,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, else wl->sta_count++; out: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -2677,9 +2693,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { /* disable active roles */ - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto deinit; + } if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { @@ -2697,7 +2715,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, goto deinit; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } deinit: wl12xx_tx_reset_wlvif(wl, wlvif); @@ -3121,9 +3140,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* configure each interface */ wl12xx_for_each_wlvif(wl, wlvif) { @@ -3133,7 +3154,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3202,9 +3224,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -3247,7 +3271,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, */ out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3454,13 +3479,16 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, goto out_wake_queues; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_wake_queues; + } ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_wake_queues: if (might_change_spare) @@ -3600,9 +3628,11 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, goto out_unlock; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_unlock; + } wlvif->default_key = key_idx; @@ -3616,7 +3646,8 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -3634,7 +3665,7 @@ void wlcore_regdomain_config(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); + ret = pm_runtime_get_sync(wl->dev); if (ret < 0) goto out; @@ -3644,7 +3675,8 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -3678,9 +3710,11 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* fail if there is any role in ROC */ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { @@ -3691,7 +3725,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3718,9 +3753,11 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (wl->scan.state != WL1271_SCAN_STATE_DONE) { ret = wl->ops->scan_stop(wl, wlvif); @@ -3741,7 +3778,8 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, ieee80211_scan_completed(wl->hw, &info); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3766,9 +3804,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); if (ret < 0) @@ -3777,7 +3817,8 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, wl->sched_vif = wlvif; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -3797,13 +3838,16 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl->ops->sched_scan_stop(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3822,15 +3866,18 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_acx_frag_threshold(wl, value); if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3851,16 +3898,19 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_rts_threshold(wl, wlvif, value); if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4607,9 +4657,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if ((changed & BSS_CHANGED_TXPOWER) && bss_conf->txpower != wlvif->power_level) { @@ -4626,7 +4678,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4665,9 +4718,11 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl12xx_for_each_wlvif(wl, wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); @@ -4690,7 +4745,8 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, } } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4719,9 +4775,11 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wlvif->band = ctx->def.chan->band; wlvif->channel = channel; @@ -4737,7 +4795,8 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = true; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4768,9 +4827,11 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (wlvif->radar_enabled) { wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); @@ -4778,7 +4839,8 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = false; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -4835,9 +4897,11 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } for (i = 0; i < n_vifs; i++) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif); @@ -4847,7 +4911,8 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, goto out_sleep; } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4878,9 +4943,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* * the txop is confed in units of 32us by the mac80211, @@ -4899,7 +4966,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 0, 0); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4923,16 +4991,19 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); if (ret < 0) goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5238,13 +5309,16 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); if (new_state < old_state) @@ -5293,9 +5367,11 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ba_bitmap = &wl->links[hlid].ba_bitmap; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", tid, action); @@ -5368,7 +5444,8 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5402,16 +5479,19 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, if (wlvif->bss_type == BSS_TYPE_STA_BSS && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); } out: mutex_unlock(&wl->mutex); @@ -5441,9 +5521,11 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } /* TODO: change mac80211 to pass vif as param */ @@ -5465,7 +5547,8 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5532,9 +5615,11 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl->ops->channel_switch(wl, wlvif, &ch_switch); if (ret) @@ -5543,7 +5628,8 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } @@ -5584,9 +5670,11 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); if (ret < 0) @@ -5596,7 +5684,8 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, msecs_to_jiffies(duration)); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); return ret; @@ -5638,13 +5727,16 @@ static int wlcore_roc_completed(struct wl1271 *wl) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = __wlcore_roc_completed(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5719,19 +5811,22 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out_sleep; + } ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm); if (ret < 0) goto out_sleep; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); sinfo->signal = rssi_dbm; out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -6300,7 +6395,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, skb_queue_head_init(&wl->deferred_rx_queue); skb_queue_head_init(&wl->deferred_tx_queue); - INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); @@ -6575,6 +6669,99 @@ out: complete_all(&wl->nvs_loading_complete); } +static int __maybe_unused wlcore_runtime_suspend(struct device *dev) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + struct wl12xx_vif *wlvif; + int error; + + /* We do not enter elp sleep in PLT mode */ + if (wl->plt) + return 0; + + /* Nothing to do if no ELP mode requested */ + if (wl->sleep_auth != WL1271_PSM_ELP) + return 0; + + wl12xx_for_each_wlvif(wl, wlvif) { + if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && + test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) + return -EBUSY; + } + + wl1271_debug(DEBUG_PSM, "chip to elp"); + error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); + if (error < 0) { + wl12xx_queue_recovery_work(wl); + + return error; + } + + set_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + return 0; +} + +static int __maybe_unused wlcore_runtime_resume(struct device *dev) +{ + struct wl1271 *wl = dev_get_drvdata(dev); + DECLARE_COMPLETION_ONSTACK(compl); + unsigned long flags; + int ret; + unsigned long start_time = jiffies; + bool pending = false; + + /* Nothing to do if no ELP mode requested */ + if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) + return 0; + + wl1271_debug(DEBUG_PSM, "waking up chip from elp"); + + spin_lock_irqsave(&wl->wl_lock, flags); + if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) + pending = true; + else + wl->elp_compl = &compl; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto err; + } + + if (!pending) { + ret = wait_for_completion_timeout(&compl, + msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); + if (ret == 0) { + wl1271_error("ELP wakeup timeout!"); + wl12xx_queue_recovery_work(wl); + + /* Return no error for runtime PM for recovery */ + return 0; + } + } + + clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); + + wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start_time)); + + return 0; + +err: + spin_lock_irqsave(&wl->wl_lock, flags); + wl->elp_compl = NULL; + spin_unlock_irqrestore(&wl->wl_lock, flags); + return ret; +} + +static const struct dev_pm_ops wlcore_pm_ops = { + SET_RUNTIME_PM_OPS(wlcore_runtime_suspend, + wlcore_runtime_resume, + NULL) +}; + int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); @@ -6602,6 +6789,11 @@ int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) wlcore_nvs_cb(NULL, wl); } + wl->dev->driver->pm = &wlcore_pm_ops; + pm_runtime_set_autosuspend_delay(wl->dev, 50); + pm_runtime_use_autosuspend(wl->dev); + pm_runtime_enable(wl->dev); + return ret; } EXPORT_SYMBOL_GPL(wlcore_probe); @@ -6610,6 +6802,13 @@ int wlcore_remove(struct platform_device *pdev) { struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); struct wl1271 *wl = platform_get_drvdata(pdev); + int error; + + error = pm_runtime_get_sync(wl->dev); + if (error < 0) + dev_warn(wl->dev, "PM runtime failed: %i\n", error); + + wl->dev->driver->pm = NULL; if (pdev_data->family && pdev_data->family->nvs_name) wait_for_completion(&wl->nvs_loading_complete); @@ -6621,6 +6820,11 @@ int wlcore_remove(struct platform_device *pdev) disable_irq_wake(wl->irq); } wl1271_unregister_hw(wl); + + pm_runtime_put_sync(wl->dev); + pm_runtime_dont_use_autosuspend(wl->dev); + pm_runtime_disable(wl->dev); + free_irq(wl->irq, wl); wlcore_free_hw(wl); diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c index b36133b739cb..9de843d1984b 100644 --- a/drivers/net/wireless/ti/wlcore/ps.c +++ b/drivers/net/wireless/ti/wlcore/ps.c @@ -26,152 +26,6 @@ #include "tx.h" #include "debug.h" -#define WL1271_WAKEUP_TIMEOUT 500 - -#define ELP_ENTRY_DELAY 30 -#define ELP_ENTRY_DELAY_FORCE_PS 5 - -void wl1271_elp_work(struct work_struct *work) -{ - struct delayed_work *dwork; - struct wl1271 *wl; - struct wl12xx_vif *wlvif; - int ret; - - dwork = to_delayed_work(work); - wl = container_of(dwork, struct wl1271, elp_work); - - wl1271_debug(DEBUG_PSM, "elp work"); - - mutex_lock(&wl->mutex); - - if (unlikely(wl->state != WLCORE_STATE_ON)) - goto out; - - /* our work might have been already cancelled */ - if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) - goto out; - - if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) - goto out; - - wl12xx_for_each_wlvif(wl, wlvif) { - if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - goto out; - } - - wl1271_debug(DEBUG_PSM, "chip to elp"); - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto out; - } - - set_bit(WL1271_FLAG_IN_ELP, &wl->flags); - -out: - mutex_unlock(&wl->mutex); -} - -/* Routines to toggle sleep mode while in ELP */ -void wl1271_ps_elp_sleep(struct wl1271 *wl) -{ - struct wl12xx_vif *wlvif; - u32 timeout; - - /* We do not enter elp sleep in PLT mode */ - if (wl->plt) - return; - - if (wl->sleep_auth != WL1271_PSM_ELP) - return; - - /* we shouldn't get consecutive sleep requests */ - if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) - return; - - wl12xx_for_each_wlvif(wl, wlvif) { - if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && - test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) - return; - } - - timeout = wl->conf.conn.forced_ps ? - ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY; - ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, - msecs_to_jiffies(timeout)); -} -EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep); - -int wl1271_ps_elp_wakeup(struct wl1271 *wl) -{ - DECLARE_COMPLETION_ONSTACK(compl); - unsigned long flags; - int ret; - unsigned long start_time = jiffies; - bool pending = false; - - /* - * we might try to wake up even if we didn't go to sleep - * before (e.g. on boot) - */ - if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)) - return 0; - - /* don't cancel_sync as it might contend for a mutex and deadlock */ - cancel_delayed_work(&wl->elp_work); - - if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) - return 0; - - wl1271_debug(DEBUG_PSM, "waking up chip from elp"); - - /* - * The spinlock is required here to synchronize both the work and - * the completion variable in one entity. - */ - spin_lock_irqsave(&wl->wl_lock, flags); - if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) - pending = true; - else - wl->elp_compl = &compl; - spin_unlock_irqrestore(&wl->wl_lock, flags); - - ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); - if (ret < 0) { - wl12xx_queue_recovery_work(wl); - goto err; - } - - if (!pending) { - ret = wait_for_completion_timeout( - &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); - if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); - ret = -ETIMEDOUT; - goto err; - } - } - - clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); - - wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", - jiffies_to_msecs(jiffies - start_time)); - goto out; - -err: - spin_lock_irqsave(&wl->wl_lock, flags); - wl->elp_compl = NULL; - spin_unlock_irqrestore(&wl->wl_lock, flags); - return ret; - -out: - return 0; -} -EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup); - int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum wl1271_cmd_ps_mode mode) { diff --git a/drivers/net/wireless/ti/wlcore/ps.h b/drivers/net/wireless/ti/wlcore/ps.h index de4f9da8ed26..411727587f95 100644 --- a/drivers/net/wireless/ti/wlcore/ps.h +++ b/drivers/net/wireless/ti/wlcore/ps.h @@ -29,9 +29,6 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum wl1271_cmd_ps_mode mode); -void wl1271_ps_elp_sleep(struct wl1271 *wl); -int wl1271_ps_elp_wakeup(struct wl1271 *wl); -void wl1271_elp_work(struct work_struct *work); void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid, bool clean_queues); void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 5612f5916b4e..764e723e4ef9 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -22,13 +22,13 @@ */ #include <linux/ieee80211.h> +#include <linux/pm_runtime.h> #include "wlcore.h" #include "debug.h" #include "cmd.h" #include "scan.h" #include "acx.h" -#include "ps.h" #include "tx.h" void wl1271_scan_complete_work(struct work_struct *work) @@ -67,17 +67,17 @@ void wl1271_scan_complete_work(struct work_struct *work) wl->scan.req = NULL; wl->scan_wlvif = NULL; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { /* restore hardware connection monitoring template */ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq); } - wl1271_ps_elp_sleep(wl); - if (wl->scan.failed) { wl1271_info("Scan completed due to error."); wl12xx_queue_recovery_work(wl); @@ -85,6 +85,9 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); + ieee80211_scan_completed(wl->hw, &info); out: diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index d31eb775e023..7425ba9471d0 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -19,9 +19,11 @@ * */ +#include <linux/pm_runtime.h> + +#include "acx.h" #include "wlcore.h" #include "debug.h" -#include "ps.h" #include "sysfs.h" static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev, @@ -68,12 +70,15 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } wl1271_acx_sg_enable(wl, wl->sg_enabled); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index 009ec07c4cec..dcb2c8b0feb6 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -22,13 +22,13 @@ */ #include "testmode.h" +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <net/genetlink.h> #include "wlcore.h" #include "debug.h" #include "acx.h" -#include "ps.h" #include "io.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 @@ -97,9 +97,11 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { @@ -141,7 +143,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) } out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -169,9 +172,11 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { @@ -205,7 +210,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) out_free: kfree(cmd); out_sleep: - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 00e9b4624dcf..b6e19c2d66b0 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> +#include <linux/pm_runtime.h> #include <linux/spinlock.h> #include "wlcore.h" @@ -868,9 +869,11 @@ void wl1271_tx_work(struct work_struct *work) int ret; mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_tx_work_locked(wl); if (ret < 0) { @@ -878,7 +881,8 @@ void wl1271_tx_work(struct work_struct *work) goto out; } - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index 5c0bcb1fe1a1..dbe78d8491ef 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -8,12 +8,13 @@ * version 2 as published by the Free Software Foundation. */ +#include <linux/pm_runtime.h> + #include <net/mac80211.h> #include <net/netlink.h> #include "wlcore.h" #include "debug.h" -#include "ps.h" #include "hw_ops.h" #include "vendor_cmd.h" @@ -55,14 +56,17 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -87,13 +91,16 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_stop(wl); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -131,16 +138,19 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, goto out; } - ret = wl1271_ps_elp_wakeup(wl); - if (ret < 0) + ret = pm_runtime_get_sync(wl->dev); + if (ret < 0) { + pm_runtime_put_noidle(wl->dev); goto out; + } ret = wlcore_smart_config_set_group_key(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); - wl1271_ps_elp_sleep(wl); + pm_runtime_mark_last_busy(wl->dev); + pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 95fbedc8ea34..d4b1f66ef457 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -348,7 +348,6 @@ struct wl1271 { enum nl80211_band band; struct completion *elp_compl; - struct delayed_work elp_work; /* in dBm */ int power_level; diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index e840985385fc..32ec121ccac2 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -233,7 +233,6 @@ enum wl12xx_flags { WL1271_FLAG_TX_QUEUE_STOPPED, WL1271_FLAG_TX_PENDING, WL1271_FLAG_IN_ELP, - WL1271_FLAG_ELP_REQUESTED, WL1271_FLAG_IRQ_RUNNING, WL1271_FLAG_FW_TX_BUSY, WL1271_FLAG_DUMMY_PACKET_PENDING, diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c index 07b94eda9604..dd6a86b899eb 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c @@ -1341,7 +1341,7 @@ int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) case ZD_LED_SCANNING: ioreqs[0].value = FW_LINK_OFF; ioreqs[1].value = v[1] & ~other_led; - if (get_seconds() % 3 == 0) { + if ((u32)ktime_get_seconds() % 3 == 0) { ioreqs[1].value &= ~chip->link_led; } else { ioreqs[1].value |= chip->link_led; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index c30bf118c67d..c2cda3acd4af 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -371,25 +371,27 @@ static inline void handle_regs_int_override(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; + unsigned long flags; - spin_lock(&intr->lock); + spin_lock_irqsave(&intr->lock, flags); if (atomic_read(&intr->read_regs_enabled)) { atomic_set(&intr->read_regs_enabled, 0); intr->read_regs_int_overridden = 1; complete(&intr->read_regs.completion); } - spin_unlock(&intr->lock); + spin_unlock_irqrestore(&intr->lock, flags); } static inline void handle_regs_int(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_usb_interrupt *intr = &usb->intr; + unsigned long flags; int len; u16 int_num; ZD_ASSERT(in_interrupt()); - spin_lock(&intr->lock); + spin_lock_irqsave(&intr->lock, flags); int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); if (int_num == CR_INTERRUPT) { @@ -425,7 +427,7 @@ static inline void handle_regs_int(struct urb *urb) } out: - spin_unlock(&intr->lock); + spin_unlock_irqrestore(&intr->lock, flags); /* CR_INTERRUPT might override read_reg too. */ if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled)) @@ -665,6 +667,7 @@ static void rx_urb_complete(struct urb *urb) struct zd_usb_rx *rx; const u8 *buffer; unsigned int length; + unsigned long flags; switch (urb->status) { case 0: @@ -693,14 +696,14 @@ static void rx_urb_complete(struct urb *urb) /* If there is an old first fragment, we don't care. */ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); - spin_lock(&rx->lock); + spin_lock_irqsave(&rx->lock, flags); memcpy(rx->fragment, buffer, length); rx->fragment_length = length; - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); goto resubmit; } - spin_lock(&rx->lock); + spin_lock_irqsave(&rx->lock, flags); if (rx->fragment_length > 0) { /* We are on a second fragment, we believe */ ZD_ASSERT(length + rx->fragment_length <= @@ -710,9 +713,9 @@ static void rx_urb_complete(struct urb *urb) handle_rx_packet(usb, rx->fragment, rx->fragment_length + length); rx->fragment_length = 0; - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); } else { - spin_unlock(&rx->lock); + spin_unlock_irqrestore(&rx->lock, flags); handle_rx_packet(usb, buffer, length); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 78ebe494fef0..92274c237200 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue) } static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, + struct net_device *sb_dev, select_queue_fallback_t fallback) { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) - return fallback(dev, skb) % dev->real_num_tx_queues; + return fallback(dev, skb, NULL) % dev->real_num_tx_queues; xenvif_set_skb_hash(vif, skb); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2d8812dd1534..799cba4624a5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -546,7 +546,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb) } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + struct net_device *sb_dev, + select_queue_fallback_t fallback) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; @@ -1610,7 +1611,7 @@ static int xennet_init_queue(struct netfront_queue *queue) timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); snprintf(queue->name, sizeof(queue->name), "%s-q%u", - queue->info->netdev->name, queue->id); + queue->info->xbdev->nodename, queue->id); /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; |