diff options
Diffstat (limited to 'drivers')
539 files changed, 26078 insertions, 10810 deletions
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 82532c299bb5..5278c57dce73 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2826,8 +2826,8 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) case 0x6: { ia_cmds.status = 0; - printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog)); - printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q)); + printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog)); + printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q)); } break; case 0x8: diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f2471172a961..1cb5a0b85fd9 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -114,7 +114,7 @@ static int drbd_msg_put_info(struct sk_buff *skb, const char *info) if (!info || !info[0]) return 0; - nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY); + nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY); if (!nla) return err; @@ -135,7 +135,7 @@ static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...) int err = -EMSGSIZE; int len; - nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY); + nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY); if (!nla) return err; @@ -3269,7 +3269,7 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_device *device) { struct nlattr *nla; - nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); + nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT); if (!nla) goto nla_put_failure; if (device && @@ -3837,7 +3837,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, if (err) goto nla_put_failure; - nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO); + nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO); if (!nla) goto nla_put_failure; if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) || diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c index 8e261cb5198b..6a09b0b98018 100644 --- a/drivers/block/drbd/drbd_nla.c +++ b/drivers/block/drbd/drbd_nla.c @@ -35,7 +35,8 @@ int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, err = drbd_nla_check_mandatory(maxtype, nla); if (!err) - err = nla_parse_nested(tb, maxtype, nla, policy, NULL); + err = nla_parse_nested_deprecated(tb, maxtype, nla, policy, + NULL); return err; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 90ba9f4c03f3..053958a8a2ba 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -44,6 +44,9 @@ #include <linux/nbd-netlink.h> #include <net/genetlink.h> +#define CREATE_TRACE_POINTS +#include <trace/events/nbd.h> + static DEFINE_IDR(nbd_index_idr); static DEFINE_MUTEX(nbd_index_mutex); static int nbd_total_devices = 0; @@ -510,6 +513,10 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) if (sent) { if (sent >= sizeof(request)) { skip = sent - sizeof(request); + + /* initialize handle for tracing purposes */ + handle = nbd_cmd_handle(cmd); + goto send_pages; } iov_iter_advance(&from, sent); @@ -526,11 +533,14 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) handle = nbd_cmd_handle(cmd); memcpy(request.handle, &handle, sizeof(handle)); + trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); + dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", req, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, index, 1, &from, (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); + trace_nbd_header_sent(req, handle); if (result <= 0) { if (was_interrupted(result)) { /* If we havne't sent anything we can just return BUSY, @@ -603,6 +613,7 @@ send_pages: bio = next; } out: + trace_nbd_payload_sent(req, handle); nsock->pending = NULL; nsock->sent = 0; return 0; @@ -650,6 +661,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) tag, req); return ERR_PTR(-ENOENT); } + trace_nbd_header_received(req, handle); cmd = blk_mq_rq_to_pdu(req); mutex_lock(&cmd->lock); @@ -703,6 +715,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) } } out: + trace_nbd_payload_received(req, handle); mutex_unlock(&cmd->lock); return ret ? ERR_PTR(ret) : cmd; } @@ -1797,8 +1810,10 @@ again: ret = -EINVAL; goto out; } - ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, - nbd_sock_policy, info->extack); + ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, + attr, + nbd_sock_policy, + info->extack); if (ret != 0) { printk(KERN_ERR "nbd: error processing sock list\n"); ret = -EINVAL; @@ -1968,8 +1983,10 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) ret = -EINVAL; goto out; } - ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, - nbd_sock_policy, info->extack); + ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, + attr, + nbd_sock_policy, + info->extack); if (ret != 0) { printk(KERN_ERR "nbd: error processing sock list\n"); ret = -EINVAL; @@ -1999,22 +2016,22 @@ out: static const struct genl_ops nbd_connect_genl_ops[] = { { .cmd = NBD_CMD_CONNECT, - .policy = nbd_attr_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nbd_genl_connect, }, { .cmd = NBD_CMD_DISCONNECT, - .policy = nbd_attr_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nbd_genl_disconnect, }, { .cmd = NBD_CMD_RECONFIGURE, - .policy = nbd_attr_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nbd_genl_reconfigure, }, { .cmd = NBD_CMD_STATUS, - .policy = nbd_attr_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nbd_genl_status, }, }; @@ -2031,6 +2048,7 @@ static struct genl_family nbd_genl_family __ro_after_init = { .ops = nbd_connect_genl_ops, .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), .maxattr = NBD_ATTR_MAX, + .policy = nbd_attr_policy, .mcgrps = nbd_mcast_grps, .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), }; @@ -2050,7 +2068,7 @@ static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) */ if (refcount_read(&nbd->config_refs)) connected = 1; - dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM); + dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM); if (!dev_opt) return -EMSGSIZE; ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); @@ -2098,7 +2116,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) goto out; } - dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST); + dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) { diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 7b2e76e7f22f..b9c34ff9a0d3 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -336,7 +336,7 @@ config BT_MRVL The core driver to support Marvell Bluetooth devices. This driver is required if you want to support - Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997. + Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8987/8997. Say Y here to compile Marvell Bluetooth driver into the kernel or say M to compile it as module. @@ -350,7 +350,7 @@ config BT_MRVL_SDIO The driver for Marvell Bluetooth chipsets with SDIO interface. This driver is required if you want to use Marvell Bluetooth - devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997 + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997 chipsets are supported. Say Y here to compile support for Marvell BT-over-SDIO driver @@ -379,6 +379,17 @@ config BT_WILINK Say Y here to compile support for Texas Instrument's WiLink7 driver into the kernel or say M to compile it as module (btwilink). +config BT_MTKSDIO + tristate "MediaTek HCI SDIO driver" + depends on MMC + help + MediaTek Bluetooth HCI SDIO driver. + This driver is required if you want to use MediaTek Bluetooth + with SDIO interface. + + Say Y here to compile support for MediaTek Bluetooth SDIO devices + into the kernel or say M to compile it as module (btmtksdio). + config BT_MTKUART tristate "MediaTek HCI UART driver" depends on SERIAL_DEV_BUS diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index b7e393cfc1e3..34887b9b3a85 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o obj-$(CONFIG_BT_MRVL) += btmrvl.o obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o +obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o obj-$(CONFIG_BT_MTKUART) += btmtkuart.o obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o obj-$(CONFIG_BT_BCM) += btbcm.o diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index d5d6e6e5da3b..71e74ec08310 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -37,6 +37,7 @@ #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}}) #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) +#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}}) int btbcm_check_bdaddr(struct hci_dev *hdev) { @@ -82,7 +83,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) || !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) || - !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) { + !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || + !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { bt_dev_info(hdev, "BCM: Using default device address (%pMR)", &bda->bdaddr); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); @@ -333,6 +335,7 @@ struct bcm_subver_table { static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ + { 0x4204, "BCM2076B1" }, /* 002.002.004 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 047b75ce1deb..0f3a020703ab 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -235,6 +235,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = { .fw_dump_end = 0xf8, }; +static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = { + .cfg = 0x00, + .host_int_mask = 0x08, + .host_intstatus = 0x0c, + .card_status = 0x5c, + .sq_read_base_addr_a0 = 0xf8, + .sq_read_base_addr_a1 = 0xf9, + .card_revision = 0xc8, + .card_fw_status0 = 0xe8, + .card_fw_status1 = 0xe9, + .card_rx_len = 0xea, + .card_rx_unit = 0xeb, + .io_port_0 = 0xe4, + .io_port_1 = 0xe5, + .io_port_2 = 0xe6, + .int_read_to_clear = true, + .host_int_rsr = 0x04, + .card_misc_cfg = 0xd8, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, +}; + static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = { .cfg = 0x00, .host_int_mask = 0x08, @@ -312,6 +335,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { .supports_fw_dump = true, }; +static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { + .helper = NULL, + .firmware = "mrvl/sd8987_uapsta.bin", + .reg = &btmrvl_reg_8987, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, .firmware = "mrvl/sd8997_uapsta.bin", @@ -343,6 +375,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8977 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146), .driver_data = (unsigned long)&btmrvl_sdio_sd8977 }, + /* Marvell SD8987 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A), + .driver_data = (unsigned long)&btmrvl_sdio_sd8987 }, /* Marvell SD8997 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142), .driver_data = (unsigned long)&btmrvl_sdio_sd8997 }, @@ -1797,4 +1832,5 @@ MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c new file mode 100644 index 000000000000..813338288453 --- /dev/null +++ b/drivers/bluetooth/btmtksdio.c @@ -0,0 +1,1101 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 MediaTek Inc. + +/* + * Bluetooth support for MediaTek SDIO devices + * + * This file is written based on btsdio.c and btmtkuart.c. + * + * Author: Sean Wang <sean.wang@mediatek.com> + * + */ + +#include <asm/unaligned.h> +#include <linux/atomic.h> +#include <linux/firmware.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/skbuff.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/mmc/sdio_func.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "h4_recv.h" + +#define VERSION "0.1" + +#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin" +#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin" + +#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000 + +static bool enable_autosuspend; + +struct btmtksdio_data { + const char *fwname; +}; + +static const struct btmtksdio_data mt7663_data = { + .fwname = FIRMWARE_MT7663, +}; + +static const struct btmtksdio_data mt7668_data = { + .fwname = FIRMWARE_MT7668, +}; + +static const struct sdio_device_id btmtksdio_table[] = { + {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663), + .driver_data = (kernel_ulong_t)&mt7663_data }, + {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668), + .driver_data = (kernel_ulong_t)&mt7668_data }, + { } /* Terminating entry */ +}; + +#define MTK_REG_CHLPCR 0x4 /* W1S */ +#define C_INT_EN_SET BIT(0) +#define C_INT_EN_CLR BIT(1) +#define C_FW_OWN_REQ_SET BIT(8) /* For write */ +#define C_COM_DRV_OWN BIT(8) /* For read */ +#define C_FW_OWN_REQ_CLR BIT(9) + +#define MTK_REG_CSDIOCSR 0x8 +#define SDIO_RE_INIT_EN BIT(0) +#define SDIO_INT_CTL BIT(2) + +#define MTK_REG_CHCR 0xc +#define C_INT_CLR_CTRL BIT(1) + +/* CHISR have the same bits field definition with CHIER */ +#define MTK_REG_CHISR 0x10 +#define MTK_REG_CHIER 0x14 +#define FW_OWN_BACK_INT BIT(0) +#define RX_DONE_INT BIT(1) +#define TX_EMPTY BIT(2) +#define TX_FIFO_OVERFLOW BIT(8) +#define RX_PKT_LEN GENMASK(31, 16) + +#define MTK_REG_CTDR 0x18 + +#define MTK_REG_CRDR 0x1c + +#define MTK_SDIO_BLOCK_SIZE 256 + +#define BTMTKSDIO_TX_WAIT_VND_EVT 1 + +enum { + MTK_WMT_PATCH_DWNLD = 0x1, + MTK_WMT_TEST = 0x2, + MTK_WMT_WAKEUP = 0x3, + MTK_WMT_HIF = 0x4, + MTK_WMT_FUNC_CTRL = 0x6, + MTK_WMT_RST = 0x7, + MTK_WMT_SEMAPHORE = 0x17, +}; + +enum { + BTMTK_WMT_INVALID, + BTMTK_WMT_PATCH_UNDONE, + BTMTK_WMT_PATCH_DONE, + BTMTK_WMT_ON_UNDONE, + BTMTK_WMT_ON_DONE, + BTMTK_WMT_ON_PROGRESS, +}; + +struct mtkbtsdio_hdr { + __le16 len; + __le16 reserved; + u8 bt_type; +} __packed; + +struct mtk_wmt_hdr { + u8 dir; + u8 op; + __le16 dlen; + u8 flag; +} __packed; + +struct mtk_hci_wmt_cmd { + struct mtk_wmt_hdr hdr; + u8 data[256]; +} __packed; + +struct btmtk_hci_wmt_evt { + struct hci_event_hdr hhdr; + struct mtk_wmt_hdr whdr; +} __packed; + +struct btmtk_hci_wmt_evt_funcc { + struct btmtk_hci_wmt_evt hwhdr; + __be16 status; +} __packed; + +struct btmtk_tci_sleep { + u8 mode; + __le16 duration; + __le16 host_duration; + u8 host_wakeup_pin; + u8 time_compensation; +} __packed; + +struct btmtk_hci_wmt_params { + u8 op; + u8 flag; + u16 dlen; + const void *data; + u32 *status; +}; + +struct btmtksdio_dev { + struct hci_dev *hdev; + struct sdio_func *func; + struct device *dev; + + struct work_struct tx_work; + unsigned long tx_state; + struct sk_buff_head txq; + + struct sk_buff *evt_skb; + + const struct btmtksdio_data *data; +}; + +static int mtk_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; + struct mtk_hci_wmt_cmd wc; + struct mtk_wmt_hdr *hdr; + int err; + + hlen = sizeof(*hdr) + wmt_params->dlen; + if (hlen > 255) + return -EINVAL; + + hdr = (struct mtk_wmt_hdr *)&wc; + hdr->dir = 1; + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc.data, wmt_params->data, wmt_params->dlen); + + set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); + if (err < 0) { + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + return err; + } + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will not have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT + * state to be cleared. The driver specific event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + return err; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); + return -ETIMEDOUT; + } + + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_skb; + } + + switch (wmt_evt->whdr.op) { + case MTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case MTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + + return err; +} + +static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, + struct sk_buff *skb) +{ + struct mtkbtsdio_hdr *sdio_hdr; + int err; + + /* Make sure that there are enough rooms for SDIO header */ + if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) { + err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0, + GFP_ATOMIC); + if (err < 0) + return err; + } + + /* Prepend MediaTek SDIO Specific Header */ + skb_push(skb, sizeof(*sdio_hdr)); + + sdio_hdr = (void *)skb->data; + sdio_hdr->len = cpu_to_le16(skb->len); + sdio_hdr->reserved = cpu_to_le16(0); + sdio_hdr->bt_type = hci_skb_pkt_type(skb); + + err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, + round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); + if (err < 0) + goto err_skb_pull; + + bdev->hdev->stat.byte_tx += skb->len; + + kfree_skb(skb); + + return 0; + +err_skb_pull: + skb_pull(skb, sizeof(*sdio_hdr)); + + return err; +} + +static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev) +{ + return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL); +} + +static void btmtksdio_tx_work(struct work_struct *work) +{ + struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev, + tx_work); + struct sk_buff *skb; + int err; + + pm_runtime_get_sync(bdev->dev); + + sdio_claim_host(bdev->func); + + while ((skb = skb_dequeue(&bdev->txq))) { + err = btmtksdio_tx_packet(bdev, skb); + if (err < 0) { + bdev->hdev->stat.err_tx++; + skb_queue_head(&bdev->txq, skb); + break; + } + } + + sdio_release_host(bdev->func); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); +} + +static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct hci_event_hdr *hdr = (void *)skb->data; + int err; + + /* Fix up the vendor event id with 0xff for vendor specific instead + * of 0xe4 so that event send via monitoring socket can be parsed + * properly. + */ + if (hdr->evt == 0xe4) + hdr->evt = HCI_EV_VENDOR; + + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { + bdev->evt_skb = skb_clone(skb, GFP_KERNEL); + if (!bdev->evt_skb) { + err = -ENOMEM; + goto err_out; + } + } + + err = hci_recv_frame(hdev, skb); + if (err < 0) + goto err_free_skb; + + if (hdr->evt == HCI_EV_VENDOR) { + if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, + &bdev->tx_state)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT); + } + } + + return 0; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + +err_out: + return err; +} + +static const struct h4_recv_pkt mtk_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = btmtksdio_recv_event }, +}; + +static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size) +{ + const struct h4_recv_pkt *pkts = mtk_recv_pkts; + int pkts_count = ARRAY_SIZE(mtk_recv_pkts); + struct mtkbtsdio_hdr *sdio_hdr; + int err, i, pad_size; + struct sk_buff *skb; + u16 dlen; + + if (rx_size < sizeof(*sdio_hdr)) + return -EILSEQ; + + /* A SDIO packet is exactly containing a Bluetooth packet */ + skb = bt_skb_alloc(rx_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_put(skb, rx_size); + + err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size); + if (err < 0) + goto err_kfree_skb; + + sdio_hdr = (void *)skb->data; + + /* We assume the default error as -EILSEQ simply to make the error path + * be cleaner. + */ + err = -EILSEQ; + + if (rx_size != le16_to_cpu(sdio_hdr->len)) { + bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched "); + goto err_kfree_skb; + } + + hci_skb_pkt_type(skb) = sdio_hdr->bt_type; + + /* Remove MediaTek SDIO header */ + skb_pull(skb, sizeof(*sdio_hdr)); + + /* We have to dig into the packet to get payload size and then know how + * many padding bytes at the tail, these padding bytes should be removed + * before the packet is indicated to the core layer. + */ + for (i = 0; i < pkts_count; i++) { + if (sdio_hdr->bt_type == (&pkts[i])->type) + break; + } + + if (i >= pkts_count) { + bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x", + sdio_hdr->bt_type); + goto err_kfree_skb; + } + + /* Remaining bytes cannot hold a header*/ + if (skb->len < (&pkts[i])->hlen) { + bt_dev_err(bdev->hdev, "The size of bt header is mismatched"); + goto err_kfree_skb; + } + + switch ((&pkts[i])->lsize) { + case 1: + dlen = skb->data[(&pkts[i])->loff]; + break; + case 2: + dlen = get_unaligned_le16(skb->data + + (&pkts[i])->loff); + break; + default: + goto err_kfree_skb; + } + + pad_size = skb->len - (&pkts[i])->hlen - dlen; + + /* Remaining bytes cannot hold a payload */ + if (pad_size < 0) { + bt_dev_err(bdev->hdev, "The size of bt payload is mismatched"); + goto err_kfree_skb; + } + + /* Remove padding bytes */ + skb_trim(skb, skb->len - pad_size); + + /* Complete frame */ + (&pkts[i])->recv(bdev->hdev, skb); + + bdev->hdev->stat.byte_rx += rx_size; + + return 0; + +err_kfree_skb: + kfree_skb(skb); + + return err; +} + +static void btmtksdio_interrupt(struct sdio_func *func) +{ + struct btmtksdio_dev *bdev = sdio_get_drvdata(func); + u32 int_status; + u16 rx_size; + + /* It is required that the host gets ownership from the device before + * accessing any register, however, if SDIO host is not being released, + * a potential deadlock probably happens in a circular wait between SDIO + * IRQ work and PM runtime work. So, we have to explicitly release SDIO + * host here and claim again after the PM runtime work is all done. + */ + sdio_release_host(bdev->func); + + pm_runtime_get_sync(bdev->dev); + + sdio_claim_host(bdev->func); + + /* Disable interrupt */ + sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); + + int_status = sdio_readl(func, MTK_REG_CHISR, NULL); + + /* Ack an interrupt as soon as possible before any operation on + * hardware. + * + * Note that we don't ack any status during operations to avoid race + * condition between the host and the device such as it's possible to + * mistakenly ack RX_DONE for the next packet and then cause interrupts + * not be raised again but there is still pending data in the hardware + * FIFO. + */ + sdio_writel(func, int_status, MTK_REG_CHISR, NULL); + + if (unlikely(!int_status)) + bt_dev_err(bdev->hdev, "CHISR is 0"); + + if (int_status & FW_OWN_BACK_INT) + bt_dev_dbg(bdev->hdev, "Get fw own back"); + + if (int_status & TX_EMPTY) + schedule_work(&bdev->tx_work); + else if (unlikely(int_status & TX_FIFO_OVERFLOW)) + bt_dev_warn(bdev->hdev, "Tx fifo overflow"); + + if (int_status & RX_DONE_INT) { + rx_size = (int_status & RX_PKT_LEN) >> 16; + + if (btmtksdio_rx_packet(bdev, rx_size) < 0) + bdev->hdev->stat.err_rx++; + } + + /* Enable interrupt */ + sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0); + + pm_runtime_mark_last_busy(bdev->dev); + pm_runtime_put_autosuspend(bdev->dev); +} + +static int btmtksdio_open(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + int err; + u32 status; + + sdio_claim_host(bdev->func); + + err = sdio_enable_func(bdev->func); + if (err < 0) + goto err_release_host; + + /* Get ownership from the device */ + sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); + if (err < 0) + goto err_disable_func; + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + status & C_COM_DRV_OWN, 2000, 1000000); + if (err < 0) { + bt_dev_err(bdev->hdev, "Cannot get ownership from device"); + goto err_disable_func; + } + + /* Disable interrupt & mask out all interrupt sources */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err); + if (err < 0) + goto err_disable_func; + + sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err); + if (err < 0) + goto err_disable_func; + + err = sdio_claim_irq(bdev->func, btmtksdio_interrupt); + if (err < 0) + goto err_disable_func; + + err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE); + if (err < 0) + goto err_release_irq; + + /* SDIO CMD 5 allows the SDIO device back to idle state an + * synchronous interrupt is supported in SDIO 4-bit mode + */ + sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN, + MTK_REG_CSDIOCSR, &err); + if (err < 0) + goto err_release_irq; + + /* Setup write-1-clear for CHISR register */ + sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err); + if (err < 0) + goto err_release_irq; + + /* Setup interrupt sources */ + sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW, + MTK_REG_CHIER, &err); + if (err < 0) + goto err_release_irq; + + /* Enable interrupt */ + sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err); + if (err < 0) + goto err_release_irq; + + sdio_release_host(bdev->func); + + return 0; + +err_release_irq: + sdio_release_irq(bdev->func); + +err_disable_func: + sdio_disable_func(bdev->func); + +err_release_host: + sdio_release_host(bdev->func); + + return err; +} + +static int btmtksdio_close(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + u32 status; + int err; + + sdio_claim_host(bdev->func); + + /* Disable interrupt */ + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); + + sdio_release_irq(bdev->func); + + /* Return ownership to the device */ + sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL); + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + !(status & C_COM_DRV_OWN), 2000, 1000000); + if (err < 0) + bt_dev_err(bdev->hdev, "Cannot return ownership to device"); + + sdio_disable_func(bdev->func); + + sdio_release_host(bdev->func); + + return 0; +} + +static int btmtksdio_flush(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + skb_queue_purge(&bdev->txq); + + cancel_work_sync(&bdev->tx_work); + + return 0; +} + +static int btmtksdio_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) +{ + struct btmtk_hci_wmt_params wmt_params; + const struct firmware *fw; + const u8 *fw_ptr; + size_t fw_size; + int err, dlen; + u8 flag; + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load firmware file (%d)", err); + return err; + } + + fw_ptr = fw->data; + fw_size = fw->size; + + /* The size of patch header is 30 bytes, should be skip */ + if (fw_size < 30) { + err = -EINVAL; + goto free_fw; + } + + fw_size -= 30; + fw_ptr += 30; + flag = 1; + + wmt_params.op = MTK_WMT_PATCH_DWNLD; + wmt_params.status = NULL; + + while (fw_size > 0) { + dlen = min_t(int, 250, fw_size); + + /* Tell device the position in sequence */ + if (fw_size - dlen <= 0) + flag = 3; + else if (fw_size < fw->size - 30) + flag = 2; + + wmt_params.flag = flag; + wmt_params.dlen = dlen; + wmt_params.data = fw_ptr; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", + err); + goto free_fw; + } + + fw_size -= dlen; + fw_ptr += dlen; + } + + wmt_params.op = MTK_WMT_RST; + wmt_params.flag = 4; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + /* Activate funciton the firmware providing to */ + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); + goto free_fw; + } + + /* Wait a few moments for firmware activation done */ + usleep_range(10000, 12000); + +free_fw: + release_firmware(fw); + return err; +} + +static int btmtksdio_setup(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + ktime_t calltime, delta, rettime; + struct btmtk_tci_sleep tci_sleep; + unsigned long long duration; + struct sk_buff *skb; + int err, status; + u8 param = 0x1; + + calltime = ktime_get(); + + /* Query whether the firmware is already download */ + wmt_params.op = MTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "Firmware already downloaded"); + goto ignore_setup_fw; + } + + /* Setup a firmware which the device definitely requires */ + err = mtk_setup_firmware(hdev, bdev->data->fwname); + if (err < 0) + return err; + +ignore_setup_fw: + /* Query whether the device is already enabled */ + err = readx_poll_timeout(btmtksdio_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) + return err; + + /* The other errors happen in btusb_mtk_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; + } + + /* Enable Bluetooth protocol */ + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + pm_runtime_set_autosuspend_delay(bdev->dev, + MTKBTSDIO_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(bdev->dev); + + err = pm_runtime_set_active(bdev->dev); + if (err < 0) + return err; + + /* Default forbid runtime auto suspend, that can be allowed by + * enable_autosuspend flag or the PM runtime entry under sysfs. + */ + pm_runtime_forbid(bdev->dev); + pm_runtime_enable(bdev->dev); + + if (enable_autosuspend) + pm_runtime_allow(bdev->dev); + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + + return 0; +} + +static int btmtksdio_shutdown(struct hci_dev *hdev) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0x0; + int err; + + /* Get back the state to be consistent with the state + * in btmtksdio_setup. + */ + pm_runtime_get_sync(bdev->dev); + + /* Disable the device */ + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + pm_runtime_put_noidle(bdev->dev); + pm_runtime_disable(bdev->dev); + + return 0; +} + +static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + + default: + return -EILSEQ; + } + + skb_queue_tail(&bdev->txq, skb); + + schedule_work(&bdev->tx_work); + + return 0; +} + +static int btmtksdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct btmtksdio_dev *bdev; + struct hci_dev *hdev; + int err; + + bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL); + if (!bdev) + return -ENOMEM; + + bdev->data = (void *)id->driver_data; + if (!bdev->data) + return -ENODEV; + + bdev->dev = &func->dev; + bdev->func = func; + + INIT_WORK(&bdev->tx_work, btmtksdio_tx_work); + skb_queue_head_init(&bdev->txq); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&func->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + bdev->hdev = hdev; + + hdev->bus = HCI_SDIO; + hci_set_drvdata(hdev, bdev); + + hdev->open = btmtksdio_open; + hdev->close = btmtksdio_close; + hdev->flush = btmtksdio_flush; + hdev->setup = btmtksdio_setup; + hdev->shutdown = btmtksdio_shutdown; + hdev->send = btmtksdio_send_frame; + SET_HCIDEV_DEV(hdev, &func->dev); + + hdev->manufacturer = 70; + set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + + err = hci_register_dev(hdev); + if (err < 0) { + dev_err(&func->dev, "Can't register HCI device\n"); + hci_free_dev(hdev); + return err; + } + + sdio_set_drvdata(func, bdev); + + /* pm_runtime_enable would be done after the firmware is being + * downloaded because the core layer probably already enables + * runtime PM for this func such as the case host->caps & + * MMC_CAP_POWER_OFF_CARD. + */ + if (pm_runtime_enabled(bdev->dev)) + pm_runtime_disable(bdev->dev); + + /* As explaination in drivers/mmc/core/sdio_bus.c tells us: + * Unbound SDIO functions are always suspended. + * During probe, the function is set active and the usage count + * is incremented. If the driver supports runtime PM, + * it should call pm_runtime_put_noidle() in its probe routine and + * pm_runtime_get_noresume() in its remove routine. + * + * So, put a pm_runtime_put_noidle here ! + */ + pm_runtime_put_noidle(bdev->dev); + + return 0; +} + +static void btmtksdio_remove(struct sdio_func *func) +{ + struct btmtksdio_dev *bdev = sdio_get_drvdata(func); + struct hci_dev *hdev; + + if (!bdev) + return; + + /* Be consistent the state in btmtksdio_probe */ + pm_runtime_get_noresume(bdev->dev); + + hdev = bdev->hdev; + + sdio_set_drvdata(func, NULL); + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +#ifdef CONFIG_PM +static int btmtksdio_runtime_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmtksdio_dev *bdev; + u32 status; + int err; + + bdev = sdio_get_drvdata(func); + if (!bdev) + return 0; + + sdio_claim_host(bdev->func); + + sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); + if (err < 0) + goto out; + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + !(status & C_COM_DRV_OWN), 2000, 1000000); +out: + bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err); + + sdio_release_host(bdev->func); + + return err; +} + +static int btmtksdio_runtime_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmtksdio_dev *bdev; + u32 status; + int err; + + bdev = sdio_get_drvdata(func); + if (!bdev) + return 0; + + sdio_claim_host(bdev->func); + + sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); + if (err < 0) + goto out; + + err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, + status & C_COM_DRV_OWN, 2000, 1000000); +out: + bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err); + + sdio_release_host(bdev->func); + + return err; +} + +static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend, + btmtksdio_runtime_resume, NULL); +#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops) +#else /* CONFIG_PM */ +#define BTMTKSDIO_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct sdio_driver btmtksdio_driver = { + .name = "btmtksdio", + .probe = btmtksdio_probe, + .remove = btmtksdio_remove, + .id_table = btmtksdio_table, + .drv = { + .owner = THIS_MODULE, + .pm = BTMTKSDIO_PM_OPS, + } +}; + +module_sdio_driver(btmtksdio_driver); + +module_param(enable_autosuspend, bool, 0644); +MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default"); + +MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE_MT7663); +MODULE_FIRMWARE(FIRMWARE_MT7668); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index b0b680dd69f4..f5dbeec8e274 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; - u32 baudrate; + __le32 baudrate; u8 param; int err; diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index c72c56ea7480..6fdc25d7bba7 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -41,7 +41,7 @@ #define QCA_WCN3990_POWERON_PULSE 0xFC #define QCA_WCN3990_POWEROFF_PULSE 0xC0 -enum qca_bardrate { +enum qca_baudrate { QCA_BAUDRATE_115200 = 0, QCA_BAUDRATE_57600, QCA_BAUDRATE_38400, diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 282d1af1d3ba..4cfa9abe03c8 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -376,20 +376,7 @@ static struct sdio_driver btsdio_driver = { .id_table = btsdio_table, }; -static int __init btsdio_init(void) -{ - BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION); - - return sdio_register_driver(&btsdio_driver); -} - -static void __exit btsdio_exit(void) -{ - sdio_unregister_driver(&btsdio_driver); -} - -module_init(btsdio_init); -module_exit(btsdio_exit); +module_sdio_driver(btsdio_driver); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index ddbe518c3e5b..b5d31d583d60 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -228,9 +228,15 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) int err; if (powered && !dev->res_enabled) { - err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies); - if (err) - return err; + /* Intel Macs use bcm_apple_get_resources() and don't + * have regulator supplies configured. + */ + if (dev->supplies[0].supply) { + err = regulator_bulk_enable(BCM_NUM_SUPPLIES, + dev->supplies); + if (err) + return err; + } /* LPO clock needs to be 32.768 kHz */ err = clk_set_rate(dev->lpo_clk, 32768); @@ -259,7 +265,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) if (!powered && dev->res_enabled) { clk_disable_unprepare(dev->txco_clk); clk_disable_unprepare(dev->lpo_clk); - regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies); + + /* Intel Macs use bcm_apple_get_resources() and don't + * have regulator supplies configured. + */ + if (dev->supplies[0].supply) + regulator_bulk_disable(BCM_NUM_SUPPLIES, + dev->supplies); } /* wait for device to power on and come out of reset */ diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 069d1c8fde73..3f02ae560120 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -536,7 +536,7 @@ static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) skb_put_data(h5->rx_skb, byte, 1); h5->rx_pending--; - BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); + BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); } static void h5_reset_rx(struct h5 *h5) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 237aea34b69f..7f75652686fe 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -174,6 +174,21 @@ static int qca_power_setup(struct hci_uart *hu, bool on); static void qca_power_shutdown(struct hci_uart *hu); static int qca_power_off(struct hci_dev *hdev); +static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu) +{ + enum qca_btsoc_type soc_type; + + if (hu->serdev) { + struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); + + soc_type = qsd->btsoc_type; + } else { + soc_type = QCA_ROME; + } + + return soc_type; +} + static void __serial_clock_on(struct tty_struct *tty) { /* TODO: Some chipset requires to enable UART clock on client @@ -508,6 +523,8 @@ static int qca_open(struct hci_uart *hu) qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->btsoc_type != QCA_WCN3990) { gpiod_set_value_cansleep(qcadev->bt_en, 1); + /* Controller needs time to bootup. */ + msleep(150); } else { hu->init_speed = qcadev->init_speed; hu->oper_speed = qcadev->oper_speed; @@ -963,7 +980,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; - struct qca_serdev *qcadev; struct sk_buff *skb; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; @@ -985,18 +1001,17 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); - qcadev = serdev_device_get_drvdata(hu->serdev); - /* Wait for the baudrate change request to be sent */ while (!skb_queue_empty(&qca->txq)) usleep_range(100, 200); - serdev_device_wait_until_sent(hu->serdev, + if (hu->serdev) + serdev_device_wait_until_sent(hu->serdev, msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); /* Give the controller time to process the request */ - if (qcadev->btsoc_type == QCA_WCN3990) + if (qca_soc_type(hu) == QCA_WCN3990) msleep(10); else msleep(300); @@ -1072,10 +1087,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu, static int qca_check_speeds(struct hci_uart *hu) { - struct qca_serdev *qcadev; - - qcadev = serdev_device_get_drvdata(hu->serdev); - if (qcadev->btsoc_type == QCA_WCN3990) { + if (qca_soc_type(hu) == QCA_WCN3990) { if (!qca_get_speed(hu, QCA_INIT_SPEED) && !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; @@ -1091,7 +1103,6 @@ static int qca_check_speeds(struct hci_uart *hu) static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) { unsigned int speed, qca_baudrate; - struct qca_serdev *qcadev; int ret = 0; if (speed_type == QCA_INIT_SPEED) { @@ -1099,6 +1110,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) if (speed) host_set_baudrate(hu, speed); } else { + enum qca_btsoc_type soc_type = qca_soc_type(hu); + speed = qca_get_speed(hu, QCA_OPER_SPEED); if (!speed) return 0; @@ -1106,8 +1119,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) /* Disable flow control for wcn3990 to deassert RTS while * changing the baudrate of chip and host. */ - qcadev = serdev_device_get_drvdata(hu->serdev); - if (qcadev->btsoc_type == QCA_WCN3990) + if (soc_type == QCA_WCN3990) hci_uart_set_flow_control(hu, true); qca_baudrate = qca_get_baudrate_value(speed); @@ -1119,7 +1131,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) host_set_baudrate(hu, speed); error: - if (qcadev->btsoc_type == QCA_WCN3990) + if (soc_type == QCA_WCN3990) hci_uart_set_flow_control(hu, false); } @@ -1181,12 +1193,10 @@ static int qca_setup(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; struct qca_data *qca = hu->priv; unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; - struct qca_serdev *qcadev; + enum qca_btsoc_type soc_type = qca_soc_type(hu); int ret; int soc_ver = 0; - qcadev = serdev_device_get_drvdata(hu->serdev); - ret = qca_check_speeds(hu); if (ret) return ret; @@ -1194,7 +1204,7 @@ static int qca_setup(struct hci_uart *hu) /* Patch downloading has to be done without IBS mode */ clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); - if (qcadev->btsoc_type == QCA_WCN3990) { + if (soc_type == QCA_WCN3990) { bt_dev_info(hdev, "setting up wcn3990"); /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute @@ -1225,7 +1235,7 @@ static int qca_setup(struct hci_uart *hu) qca_baudrate = qca_get_baudrate_value(speed); } - if (qcadev->btsoc_type != QCA_WCN3990) { + if (soc_type != QCA_WCN3990) { /* Get QCA version information */ ret = qca_read_soc_version(hdev, &soc_ver); if (ret) @@ -1234,7 +1244,7 @@ static int qca_setup(struct hci_uart *hu) bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); /* Setup patch / NVM configurations */ - ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver); + ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver); if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); @@ -1250,7 +1260,7 @@ static int qca_setup(struct hci_uart *hu) } /* Setup bdaddr */ - if (qcadev->btsoc_type == QCA_WCN3990) + if (soc_type == QCA_WCN3990) hu->hdev->set_bdaddr = qca_set_bdaddr; else hu->hdev->set_bdaddr = qca_set_bdaddr_rome; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 0dce94e3c495..744b6ec0acb0 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -42,7 +42,7 @@ #include <net/neighbour.h> #include <net/route.h> #include <net/netevent.h> -#include <net/addrconf.h> +#include <net/ipv6_stubs.h> #include <net/ip6_route.h> #include <rdma/ib_addr.h> #include <rdma/ib_sa.h> @@ -86,8 +86,8 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return false; - ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_addr_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_addr_policy, NULL); if (ret) return false; @@ -351,7 +351,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family) if (family == AF_INET) { rt = container_of(dst, struct rtable, dst); - return rt->rt_uses_gateway; + return rt->rt_gw_family == AF_INET; } rt6 = container_of(dst, struct rt6_info, dst); diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index a5d2a20ee697..41929bb83739 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -506,14 +506,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, int ret; const char *err_str = ""; - ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy, - NULL); + ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1, + nlmsg_policy, NULL); if (ret) { err_str = "Invalid attribute"; goto parse_nlmsg_error; } - ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1, - nlmsg_policy, NULL); + ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1, + nlmsg_policy, NULL); if (ret) { err_str = "Unable to parse the nlmsg"; goto parse_nlmsg_error; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 11ed58d3fce5..85324012bf07 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -292,7 +292,8 @@ static int fill_res_info_entry(struct sk_buff *msg, { struct nlattr *entry_attr; - entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); + entry_attr = nla_nest_start_noflag(msg, + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); if (!entry_attr) return -EMSGSIZE; @@ -327,7 +328,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device) if (fill_nldev_handle(msg, device)) return -EMSGSIZE; - table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); if (!table_attr) return -EMSGSIZE; @@ -607,8 +608,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -652,8 +653,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, - extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -721,8 +722,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 port; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) @@ -777,8 +778,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, int err; unsigned int p; - err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -832,8 +833,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int ret; - ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; @@ -981,8 +982,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct sk_buff *msg; int ret; - ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL; @@ -1070,8 +1071,8 @@ static int res_get_common_dumpit(struct sk_buff *skb, u32 index, port = 0; bool filled = false; - err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, NULL); + err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in @@ -1108,7 +1109,7 @@ static int res_get_common_dumpit(struct sk_buff *skb, goto err; } - table_attr = nla_nest_start(skb, fe->nldev_attr); + table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); if (!table_attr) { ret = -EMSGSIZE; goto err; @@ -1134,7 +1135,7 @@ static int res_get_common_dumpit(struct sk_buff *skb, filled = true; - entry_attr = nla_nest_start(skb, fe->entry); + entry_attr = nla_nest_start_noflag(skb, fe->entry); if (!entry_attr) { ret = -EMSGSIZE; rdma_restrack_put(res); @@ -1249,8 +1250,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, char type[IFNAMSIZ]; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; @@ -1293,8 +1294,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, u32 index; int err; - err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); + err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7925e45ea88a..bb534959abf0 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1028,8 +1028,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, !(NETLINK_CB(skb).sk)) return -EPERM; - ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_policy, NULL); attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; if (ret || !attr) goto settimeout_out; @@ -1080,8 +1080,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) return 0; - ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), - nlmsg_len(nlh), ib_nl_policy, NULL); + ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), + nlmsg_len(nlh), ib_nl_policy, NULL); if (ret) return 0; diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c index 9a7520ee41e0..f82d46ed969d 100644 --- a/drivers/infiniband/hw/cxgb4/restrack.c +++ b/drivers/infiniband/hw/cxgb4/restrack.c @@ -149,7 +149,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, if (qhp->ucontext) return 0; - table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; @@ -216,7 +216,7 @@ static int fill_res_ep_entry(struct sk_buff *msg, if (!uep) return 0; - table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err_free_uep; @@ -387,7 +387,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, if (ibcq->uobject) return 0; - table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; @@ -447,7 +447,7 @@ static int fill_res_mr_entry(struct sk_buff *msg, if (!stag) return 0; - table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER); if (!table_attr) goto err; diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index a922db58be14..2b07032dbdda 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -423,8 +423,7 @@ tx_finish: static u16 hfi1_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); struct opa_vnic_skb_mdata *mdata; diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 6bcc63aaa50b..be95ac5aeb30 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr, return ret; } - *addr = pci_resource_start(dev->pdev, 0) + + *addr = dev->bar_addr + MLX5_GET64(alloc_memic_out, out, memic_start_addr); return 0; @@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length) u64 start_page_idx; int err; - addr -= pci_resource_start(dev->pdev, 0); + addr -= dev->bar_addr; start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT; MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d3dd290ae1b1..347e3cac254e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2011,7 +2011,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; - return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; + return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; } static int get_command(unsigned long offset) @@ -2202,7 +2202,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) page_idx + npages) return -EINVAL; - pfn = ((pci_resource_start(dev->mdev->pdev, 0) + + pfn = ((dev->mdev->bar_addr + MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> PAGE_SHIFT) + page_idx; @@ -2285,7 +2285,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, goto err_free; start_offset = memic_addr & ~PAGE_MASK; - page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) - + page_idx = (memic_addr - memic->dev->bar_addr - MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> PAGE_SHIFT; @@ -2328,7 +2328,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm) if (ret) return ret; - page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) - + page_idx = (dm->dev_addr - memic->dev->bar_addr - MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> PAGE_SHIFT; bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index c85f00255884..ca921fd40499 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr, MLX5_SET64(mkc, mkc, len, length); MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET64(mkc, mkc, start_addr, - memic_addr - pci_resource_start(dev->mdev->pdev, 0)); + MLX5_SET64(mkc, mkc, start_addr, memic_addr - dev->mdev->bar_addr); err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); if (err) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8870c350fda0..fc67d78ca959 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -5122,7 +5122,7 @@ out: wmb(); /* currently we support only regular doorbells */ - mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL); + mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset); /* Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 032883180f65..0010a3ed64f1 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1407,7 +1407,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi if (neigh->nud_state & NUD_VALID) { nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X" " is %pM, Gateway is 0x%08X \n", dst_ip, - neigh->ha, ntohl(rt->rt_gateway)); + neigh->ha, ntohl(rt->rt_gw4)); if (arpindex >= 0) { if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) { diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index ae70cd18903e..aeff68f582d3 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb, } static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); struct opa_vnic_skb_mdata *mdata; @@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, mdata = skb_push(skb, sizeof(*mdata)); mdata->entropy = opa_vnic_calc_entropy(skb); mdata->vl = opa_vnic_get_vl(adapter, skb); - rc = adapter->rn_ops->ndo_select_queue(netdev, skb, - sb_dev, fallback); + rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev); skb_pull(skb, sizeof(*mdata)); return rc; } diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 362aa5450a5e..29c22d74afe0 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2041,9 +2041,9 @@ setup_hw(struct hfc_pci *hc) } printk(KERN_INFO - "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n", - (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos, - (u_long) hc->hw.dmahandle, hc->irq, HZ); + "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n", + (u_long) hc->hw.pci_io, hc->hw.fifos, + &hc->hw.dmahandle, hc->irq, HZ); /* enable memory mapped ports, disable busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index b12e6cae26c2..de965115a183 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1294,9 +1294,9 @@ void HiSax_reportcard(int cardnr, int sel) printk(KERN_DEBUG "HiSax: reportcard No %d\n", cardnr + 1); printk(KERN_DEBUG "HiSax: Type %s\n", CardType[cs->typ]); printk(KERN_DEBUG "HiSax: debuglevel %x\n", cs->debug); - printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%lX\n", - (ulong) & HiSax_reportcard); - printk(KERN_DEBUG "HiSax: cs 0x%lX\n", (ulong) cs); + printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%px\n", + HiSax_reportcard); + printk(KERN_DEBUG "HiSax: cs 0x%px\n", cs); printk(KERN_DEBUG "HiSax: HW_Flags %lx bc0 flg %lx bc1 flg %lx\n", cs->HW_Flags, cs->bcs[0].Flag, cs->bcs[1].Flag); printk(KERN_DEBUG "HiSax: bcs 0 mode %d ch%d\n", diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index a7b275ea5de1..7e0f419c14f8 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -1888,8 +1888,9 @@ static u32 isdn_ppp_mp_get_seq(int short_seq, return seq; } -struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, - struct sk_buff *from, struct sk_buff *to) +static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, + struct sk_buff *from, + struct sk_buff *to) { if (from) while (from != to) { @@ -1900,8 +1901,8 @@ struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp, return from; } -void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, - struct sk_buff *from, struct sk_buff *to) +static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, + struct sk_buff *from, struct sk_buff *to) { ippp_bundle *mp = net_dev->pb; int proto; diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 390a722e6211..ee657003c1a1 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -97,6 +97,12 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; + case BPF_FUNC_map_push_elem: + return &bpf_map_push_elem_proto; + case BPF_FUNC_map_pop_elem: + return &bpf_map_pop_elem_proto; + case BPF_FUNC_map_peek_elem: + return &bpf_map_peek_elem_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_tail_call: diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 7a96d168efc4..bc42f131f47c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -505,6 +505,7 @@ source "drivers/net/hyperv/Kconfig" config NETDEVSIM tristate "Simulated networking device" depends on DEBUG_FS + select NET_DEVLINK help This driver is a developer testing tool and software model that can be used to test various control path networking APIs, especially diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 3d27616d9c85..51cf5eca9c7f 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -116,11 +116,15 @@ static struct net_device * __init ipddp_init(void) */ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) { - __be32 paddr = skb_rtable(skb)->rt_gateway; + struct rtable *rtable = skb_rtable(skb); + __be32 paddr = 0; struct ddpehdr *ddp; struct ipddp_route *rt; struct atalk_addr *our_addr; + if (rtable->rt_gw_family == AF_INET) + paddr = rtable->rt_gw4; + spin_lock(&ipddp_route_lock); /* diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ee610721098e..062fa7e3af4c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4118,8 +4118,7 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b286f591242e..022044b59d6a 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -546,7 +546,7 @@ static int bond_fill_info(struct sk_buff *skb, if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval)) goto nla_put_failure; - targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET); + targets = nla_nest_start_noflag(skb, IFLA_BOND_ARP_IP_TARGET); if (!targets) goto nla_put_failure; @@ -644,7 +644,7 @@ static int bond_fill_info(struct sk_buff *skb, if (!bond_3ad_get_active_agg_info(bond, &info)) { struct nlattr *nest; - nest = nla_nest_start(skb, IFLA_BOND_AD_INFO); + nest = nla_nest_start_noflag(skb, IFLA_BOND_AD_INFO); if (!nest) goto nla_put_failure; @@ -711,7 +711,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb, return -EINVAL; } - nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND); + nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BOND); if (!nest) return -EMSGSIZE; if (BOND_MODE(bond) == BOND_MODE_8023AD) { @@ -722,7 +722,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb, else stats = &BOND_AD_INFO(bond).stats; - nest2 = nla_nest_start(skb, BOND_XSTATS_3AD); + nest2 = nla_nest_start_noflag(skb, BOND_XSTATS_3AD); if (!nest2) { nla_nest_end(skb, nest); return -EMSGSIZE; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 71bb3aebded4..82560b710681 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -41,7 +41,7 @@ config NET_DSA_MT7530 config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" - depends on NET_DSA && NET_DSA_LEGACY + depends on NET_DSA select NET_DSA_TAG_TRAILER ---help--- This enables support for the Marvell 88E6060 ethernet switch diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0852e5e08177..c8040ecf4425 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -428,7 +428,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); dev->vlan_enabled = enable; - dev->vlan_filtering_enabled = enable_filtering; } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -665,7 +664,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); + b53_enable_vlan(dev, false, ds->vlan_filtering); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, @@ -966,6 +965,13 @@ static int b53_setup(struct dsa_switch *ds) b53_disable_port(ds, port); } + /* Let DSA handle the case were multiple bridges span the same switch + * device and different VLAN awareness settings are requested, which + * would be breaking filtering semantics for any of the other bridge + * devices. (not hardware supported) + */ + ds->vlan_filtering_is_global = true; + return ret; } @@ -1275,35 +1281,17 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { struct b53_device *dev = ds->priv; - struct net_device *bridge_dev; - unsigned int i; u16 pvid, new_pvid; - /* Handle the case were multiple bridges span the same switch device - * and one of them has a different setting than what is being requested - * which would be breaking filtering semantics for any of the other - * bridge devices. - */ - b53_for_each_port(dev, i) { - bridge_dev = dsa_to_port(ds, i)->bridge_dev; - if (bridge_dev && - bridge_dev != dsa_to_port(ds, port)->bridge_dev && - br_vlan_enabled(bridge_dev) != vlan_filtering) { - netdev_err(bridge_dev, - "VLAN filtering is global to the switch!\n"); - return -EINVAL; - } - } - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); new_pvid = pvid; - if (dev->vlan_filtering_enabled && !vlan_filtering) { + if (!vlan_filtering) { /* Filtering is currently enabled, use the default PVID since * the bridge does not expect tagging anymore */ dev->ports[port].pvid = pvid; new_pvid = b53_default_pvid(dev); - } else if (!dev->vlan_filtering_enabled && vlan_filtering) { + } else { /* Filtering is currently disabled, restore the previous PVID */ new_pvid = dev->ports[port].pvid; } @@ -1329,7 +1317,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid_end > dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); + b53_enable_vlan(dev, true, ds->vlan_filtering); return 0; } diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index e3441dcf2d21..f25bc80c4ffc 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -139,7 +139,6 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; bool vlan_enabled; - bool vlan_filtering_enabled; unsigned int num_ports; struct b53_port *ports; }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index c8e3f05e1d72..4ccb3239f5f7 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1188,10 +1188,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (ret) goto out_mdio; - pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", - priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, - priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, - priv->core, priv->irq0, priv->irq1); + dev_info(&pdev->dev, + "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n", + priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, + priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, + priv->irq0, priv->irq1); return 0; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index f16e1d7d8615..c026d15721f6 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) interface = PHY_INTERFACE_MODE_GMII; if (gbit) break; + /* fall through */ case 0: interface = PHY_INTERFACE_MODE_MII; break; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 7357b4fc0185..8d531c5f21f3 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -828,11 +828,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK, VLAN_ATTR(MT7530_VLAN_TRANSPARENT)); - priv->ports[port].vlan_filtering = false; - for (i = 0; i < MT7530_NUM_PORTS; i++) { if (dsa_is_user_port(ds, i) && - priv->ports[i].vlan_filtering) { + dsa_port_is_vlan_filtering(&ds->ports[i])) { all_user_ports_removed = false; break; } @@ -891,8 +889,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, * And the other port's port matrix cannot be broken when the * other port is still a VLAN-aware port. */ - if (!priv->ports[i].vlan_filtering && - dsa_is_user_port(ds, i) && i != port) { + if (dsa_is_user_port(ds, i) && i != port && + !dsa_port_is_vlan_filtering(&ds->ports[i])) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) @@ -910,8 +908,6 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, PCR_MATRIX(BIT(MT7530_CPU_PORT))); priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT)); - mt7530_port_set_vlan_unaware(ds, port); - mutex_unlock(&priv->reg_mutex); } @@ -1013,10 +1009,6 @@ static int mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { - struct mt7530_priv *priv = ds->priv; - - priv->ports[port].vlan_filtering = vlan_filtering; - if (vlan_filtering) { /* The port is being kept as VLAN-unaware port when bridge is * set up with vlan_filtering not being set, Otherwise, the @@ -1025,6 +1017,8 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, */ mt7530_port_set_vlan_aware(ds, port); mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT); + } else { + mt7530_port_set_vlan_unaware(ds, port); } return 0; @@ -1139,7 +1133,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port, /* The port is kept as VLAN-unaware if bridge with vlan_filtering not * being set. */ - if (!priv->ports[port].vlan_filtering) + if (!dsa_port_is_vlan_filtering(&ds->ports[port])) return; mutex_lock(&priv->reg_mutex); @@ -1170,7 +1164,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, /* The port is kept as VLAN-unaware if bridge with vlan_filtering not * being set. */ - if (!priv->ports[port].vlan_filtering) + if (!dsa_port_is_vlan_filtering(&ds->ports[port])) return 0; mutex_lock(&priv->reg_mutex); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index a95ed958df5b..1eec7bdc283a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -410,7 +410,6 @@ struct mt7530_port { bool enable; u32 pm; u16 pvid; - bool vlan_filtering; }; /* struct mt7530_priv - This is the main data structure for holding the state diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 0b3e51f248c2..2a2489b5196d 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include <linux/delay.h> @@ -18,40 +14,16 @@ #include <net/dsa.h> #include "mv88e6060.h" -static int reg_read(struct dsa_switch *ds, int addr, int reg) +static int reg_read(struct mv88e6060_priv *priv, int addr, int reg) { - struct mv88e6060_priv *priv = ds->priv; - return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg); } -#define REG_READ(addr, reg) \ - ({ \ - int __ret; \ - \ - __ret = reg_read(ds, addr, reg); \ - if (__ret < 0) \ - return __ret; \ - __ret; \ - }) - - -static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) +static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val) { - struct mv88e6060_priv *priv = ds->priv; - return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val); } -#define REG_WRITE(addr, reg, val) \ - ({ \ - int __ret; \ - \ - __ret = reg_write(ds, addr, reg, val); \ - if (__ret < 0) \ - return __ret; \ - }) - static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) { int ret; @@ -76,28 +48,7 @@ static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds, return DSA_TAG_PROTO_TRAILER; } -static const char *mv88e6060_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **_priv) -{ - struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); - struct mv88e6060_priv *priv; - const char *name; - - name = mv88e6060_get_name(bus, sw_addr); - if (name) { - priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return NULL; - *_priv = priv; - priv->bus = bus; - priv->sw_addr = sw_addr; - } - - return name; -} - -static int mv88e6060_switch_reset(struct dsa_switch *ds) +static int mv88e6060_switch_reset(struct mv88e6060_priv *priv) { int i; int ret; @@ -105,23 +56,32 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) /* Set all ports to the disabled state. */ for (i = 0; i < MV88E6060_PORTS; i++) { - ret = REG_READ(REG_PORT(i), PORT_CONTROL); - REG_WRITE(REG_PORT(i), PORT_CONTROL, - ret & ~PORT_CONTROL_STATE_MASK); + ret = reg_read(priv, REG_PORT(i), PORT_CONTROL); + if (ret < 0) + return ret; + ret = reg_write(priv, REG_PORT(i), PORT_CONTROL, + ret & ~PORT_CONTROL_STATE_MASK); + if (ret) + return ret; } /* Wait for transmit queues to drain. */ usleep_range(2000, 4000); /* Reset the switch. */ - REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, - GLOBAL_ATU_CONTROL_SWRESET | - GLOBAL_ATU_CONTROL_LEARNDIS); + ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_SWRESET | + GLOBAL_ATU_CONTROL_LEARNDIS); + if (ret) + return ret; /* Wait up to one second for reset to complete. */ timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS); + ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + if (ret & GLOBAL_STATUS_INIT_READY) break; @@ -133,61 +93,69 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds) return 0; } -static int mv88e6060_setup_global(struct dsa_switch *ds) +static int mv88e6060_setup_global(struct mv88e6060_priv *priv) { + int ret; + /* Disable discarding of frames with excessive collisions, * set the maximum frame size to 1536 bytes, and mask all * interrupt sources. */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); + ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL, + GLOBAL_CONTROL_MAX_FRAME_1536); + if (ret) + return ret; /* Disable automatic address learning. */ - REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, - GLOBAL_ATU_CONTROL_LEARNDIS); - - return 0; + return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_LEARNDIS); } -static int mv88e6060_setup_port(struct dsa_switch *ds, int p) +static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p) { int addr = REG_PORT(p); + int ret; /* Do not force flow control, disable Ingress and Egress * Header tagging, disable VLAN tunneling, and set the port * state to Forwarding. Additionally, if this is the CPU * port, enable Ingress and Egress Trailer tagging mode. */ - REG_WRITE(addr, PORT_CONTROL, - dsa_is_cpu_port(ds, p) ? + ret = reg_write(priv, addr, PORT_CONTROL, + dsa_is_cpu_port(priv->ds, p) ? PORT_CONTROL_TRAILER | PORT_CONTROL_INGRESS_MODE | PORT_CONTROL_STATE_FORWARDING : PORT_CONTROL_STATE_FORWARDING); + if (ret) + return ret; /* Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the CPU port. */ - REG_WRITE(addr, PORT_VLAN_MAP, - ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | - (dsa_is_cpu_port(ds, p) ? dsa_user_ports(ds) : - BIT(dsa_to_port(ds, p)->cpu_dp->index))); + ret = reg_write(priv, addr, PORT_VLAN_MAP, + ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | + (dsa_is_cpu_port(priv->ds, p) ? + dsa_user_ports(priv->ds) : + BIT(dsa_to_port(priv->ds, p)->cpu_dp->index))); + if (ret) + return ret; /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ - REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p)); - - return 0; + return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p)); } -static int mv88e6060_setup_addr(struct dsa_switch *ds) +static int mv88e6060_setup_addr(struct mv88e6060_priv *priv) { u8 addr[ETH_ALEN]; + int ret; u16 val; eth_random_addr(addr); @@ -199,34 +167,43 @@ static int mv88e6060_setup_addr(struct dsa_switch *ds) */ val &= 0xfeff; - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val); - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); - REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); + ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val); + if (ret) + return ret; + + ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23, + (addr[2] << 8) | addr[3]); + if (ret) + return ret; - return 0; + return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45, + (addr[4] << 8) | addr[5]); } static int mv88e6060_setup(struct dsa_switch *ds) { + struct mv88e6060_priv *priv = ds->priv; int ret; int i; - ret = mv88e6060_switch_reset(ds); + priv->ds = ds; + + ret = mv88e6060_switch_reset(priv); if (ret < 0) return ret; /* @@@ initialise atu */ - ret = mv88e6060_setup_global(ds); + ret = mv88e6060_setup_global(priv); if (ret < 0) return ret; - ret = mv88e6060_setup_addr(ds); + ret = mv88e6060_setup_addr(priv); if (ret < 0) return ret; for (i = 0; i < MV88E6060_PORTS; i++) { - ret = mv88e6060_setup_port(ds, i); + ret = mv88e6060_setup_port(priv, i); if (ret < 0) return ret; } @@ -243,51 +220,93 @@ static int mv88e6060_port_to_phy_addr(int port) static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum) { + struct mv88e6060_priv *priv = ds->priv; int addr; addr = mv88e6060_port_to_phy_addr(port); if (addr == -1) return 0xffff; - return reg_read(ds, addr, regnum); + return reg_read(priv, addr, regnum); } static int mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { + struct mv88e6060_priv *priv = ds->priv; int addr; addr = mv88e6060_port_to_phy_addr(port); if (addr == -1) return 0xffff; - return reg_write(ds, addr, regnum, val); + return reg_write(priv, addr, regnum, val); } static const struct dsa_switch_ops mv88e6060_switch_ops = { .get_tag_protocol = mv88e6060_get_tag_protocol, - .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, .phy_read = mv88e6060_phy_read, .phy_write = mv88e6060_phy_write, }; -static struct dsa_switch_driver mv88e6060_switch_drv = { - .ops = &mv88e6060_switch_ops, -}; - -static int __init mv88e6060_init(void) +static int mv88e6060_probe(struct mdio_device *mdiodev) { - register_switch_driver(&mv88e6060_switch_drv); - return 0; + struct device *dev = &mdiodev->dev; + struct mv88e6060_priv *priv; + struct dsa_switch *ds; + const char *name; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->bus = mdiodev->bus; + priv->sw_addr = mdiodev->addr; + + name = mv88e6060_get_name(priv->bus, priv->sw_addr); + if (!name) + return -ENODEV; + + dev_info(dev, "switch %s detected\n", name); + + ds = dsa_switch_alloc(dev, MV88E6060_PORTS); + if (!ds) + return -ENOMEM; + + ds->priv = priv; + ds->dev = dev; + ds->ops = &mv88e6060_switch_ops; + + dev_set_drvdata(dev, ds); + + return dsa_register_switch(ds); } -module_init(mv88e6060_init); -static void __exit mv88e6060_cleanup(void) +static void mv88e6060_remove(struct mdio_device *mdiodev) { - unregister_switch_driver(&mv88e6060_switch_drv); + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); + + dsa_unregister_switch(ds); } -module_exit(mv88e6060_cleanup); + +static const struct of_device_id mv88e6060_of_match[] = { + { + .compatible = "marvell,mv88e6060", + }, + { /* sentinel */ }, +}; + +static struct mdio_driver mv88e6060_driver = { + .probe = mv88e6060_probe, + .remove = mv88e6060_remove, + .mdiodrv.driver = { + .name = "mv88e6060", + .of_match_table = mv88e6060_of_match, + }, +}; + +mdio_module_driver(mv88e6060_driver); MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip"); diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h index 10249bd16292..c0e7a0f2fb6a 100644 --- a/drivers/net/dsa/mv88e6060.h +++ b/drivers/net/dsa/mv88e6060.h @@ -117,6 +117,7 @@ struct mv88e6060_priv { */ struct mii_bus *bus; int sw_addr; + struct dsa_switch *ds; }; #endif diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index f4e2db44ad91..489a899c80b6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -553,11 +553,28 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, int speed, int duplex, int pause, phy_interface_t mode) { + struct phylink_link_state state; int err; if (!chip->info->ops->port_set_link) return 0; + if (!chip->info->ops->port_link_state) + return 0; + + err = chip->info->ops->port_link_state(chip, port, &state); + if (err) + return err; + + /* Has anything actually changed? We don't expect the + * interface mode to change without one of the other + * parameters also changing + */ + if (state.link == link && + state.speed == speed && + state.duplex == duplex) + return 0; + /* Port's MAC control must not be changed unless the link is down */ err = chip->info->ops->port_set_link(chip, port, 0); if (err) @@ -4631,14 +4648,6 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } -static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip) -{ - int i; - - for (i = 0; i < mv88e6xxx_num_ports(chip); i++) - chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID; -} - static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, int port) { @@ -4647,58 +4656,6 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, return chip->info->tag_protocol; } -#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) -static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **priv) -{ - struct mv88e6xxx_chip *chip; - struct mii_bus *bus; - int err; - - bus = dsa_host_dev_to_mii_bus(host_dev); - if (!bus) - return NULL; - - chip = mv88e6xxx_alloc_chip(dsa_dev); - if (!chip) - return NULL; - - /* Legacy SMI probing will only support chips similar to 88E6085 */ - chip->info = &mv88e6xxx_table[MV88E6085]; - - err = mv88e6xxx_smi_init(chip, bus, sw_addr); - if (err) - goto free; - - err = mv88e6xxx_detect(chip); - if (err) - goto free; - - mv88e6xxx_ports_cmode_init(chip); - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_switch_reset(chip); - mutex_unlock(&chip->reg_lock); - if (err) - goto free; - - mv88e6xxx_phy_init(chip); - - err = mv88e6xxx_mdios_register(chip, NULL); - if (err) - goto free; - - *priv = chip; - - return chip->info->name; -free: - devm_kfree(dsa_dev, chip); - - return NULL; -} -#endif - static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb) { @@ -4753,9 +4710,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, } static const struct dsa_switch_ops mv88e6xxx_switch_ops = { -#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) - .probe = mv88e6xxx_drv_probe, -#endif .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .adjust_link = mv88e6xxx_adjust_link, @@ -4801,10 +4755,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_ts_info = mv88e6xxx_get_ts_info, }; -static struct dsa_switch_driver mv88e6xxx_switch_drv = { - .ops = &mv88e6xxx_switch_ops, -}; - static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) { struct device *dev = chip->dev; @@ -4915,7 +4865,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; - mv88e6xxx_ports_cmode_init(chip); mv88e6xxx_phy_init(chip); if (chip->info->ops->get_eeprom) { @@ -5047,19 +4996,7 @@ static struct mdio_driver mv88e6xxx_driver = { }, }; -static int __init mv88e6xxx_init(void) -{ - register_switch_driver(&mv88e6xxx_switch_drv); - return mdio_driver_register(&mv88e6xxx_driver); -} -module_init(mv88e6xxx_init); - -static void __exit mv88e6xxx_cleanup(void) -{ - mdio_driver_unregister(&mv88e6xxx_driver); - unregister_switch_driver(&mv88e6xxx_switch_drv); -} -module_exit(mv88e6xxx_cleanup); +mdio_module_driver(mv88e6xxx_driver); MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index c7bed263a0f4..39c85e98fb92 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -52,7 +52,6 @@ #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 -#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff /* Offset 0x01: MAC (or PCS or Physical) Control Register */ #define MV88E6XXX_PORT_MAC_CTL 0x01 diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 0d15a12a4560..3568129fb7da 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/ethtool.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/rtnetlink.h> @@ -131,21 +132,9 @@ static void dummy_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int dummy_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *ts_info) -{ - ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - - ts_info->phc_index = -1; - - return 0; -}; - static const struct ethtool_ops dummy_ethtool_ops = { .get_drvinfo = dummy_get_drvinfo, - .get_ts_info = dummy_get_ts_info, + .get_ts_info = ethtool_op_get_ts_info, }; static void dummy_setup(struct net_device *dev) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a6eacf2099c3..7e40d14682f7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2236,7 +2236,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (netif_xmit_stopped(txq) || !skb->xmit_more) { + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { /* trigger the dma engine. ena_com_write_sq_doorbell() * has a mb */ @@ -2258,8 +2258,7 @@ error_drop_packet: } static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { u16 qid; /* we suspect that this is good for in--kernel network services that @@ -2269,7 +2268,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_rx_queue_recorded(skb)) qid = skb_get_rx_queue(skb); else - qid = fallback(dev, skb, NULL); + qid = netdev_pick_tx(dev, skb, NULL); return qid; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 4666084eda16..d5fd49dd25f3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) smp_wmb(); ring->cur = cur_index + 1; - if (!packet->skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xgbe_tx_start_xmit(channel, ring); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0cc911f928b1..3dd0cecddba8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1612,7 +1612,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* PTP v1, UDP, any kind of event packet */ + /* Fall through - to PTP v1, UDP, any kind of event packet */ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1623,7 +1623,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* PTP v1, UDP, Sync packet */ + /* Fall through - to PTP v1, UDP, Sync packet */ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); @@ -1634,7 +1634,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, /* PTP v2, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); - /* PTP v1, UDP, Delay_req packet */ + /* Fall through - to PTP v1, UDP, Delay_req packet */ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig index 7d623e90dc19..12472c5bb34d 100644 --- a/drivers/net/ethernet/aquantia/Kconfig +++ b/drivers/net/ethernet/aquantia/Kconfig @@ -17,7 +17,8 @@ if NET_VENDOR_AQUANTIA config AQTION tristate "aQuantia AQtion(tm) Support" - depends on PCI && X86_64 + depends on PCI + depends on X86_64 || ARM64 || COMPILE_TEST ---help--- This enables the support for the aQuantia AQtion(tm) Ethernet card. diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile index 4556630ee286..1f99cf832476 100644 --- a/drivers/net/ethernet/aquantia/atlantic/Makefile +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \ aq_ring.o \ aq_hw_utils.o \ aq_ethtool.o \ + aq_drvinfo.o \ aq_filters.o \ hw_atl/hw_atl_a0.o \ hw_atl/hw_atl_b0.o \ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 3944ce7f0870..8f35c3f883f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -16,7 +16,7 @@ #define AQ_CFG_TCS_DEF 1U #define AQ_CFG_TXDS_DEF 4096U -#define AQ_CFG_RXDS_DEF 1024U +#define AQ_CFG_RXDS_DEF 2048U #define AQ_CFG_IS_POLLING_DEF 0U @@ -34,10 +34,16 @@ #define AQ_CFG_TCS_MAX 8U #define AQ_CFG_TX_FRAME_MAX (16U * 1024U) -#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) +#define AQ_CFG_RX_FRAME_MAX (2U * 1024U) #define AQ_CFG_TX_CLEAN_BUDGET 256U +#define AQ_CFG_RX_REFILL_THRES 32U + +#define AQ_CFG_RX_HDR_SIZE 256U + +#define AQ_CFG_RX_PAGEORDER 0U + /* LRO */ #define AQ_CFG_IS_LRO_DEF 1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 6b6d1724676e..235bb3a72d66 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -41,9 +41,6 @@ #define AQ_DEVICE_ID_AQC111S 0x91B1 #define AQ_DEVICE_ID_AQC112S 0x92B1 -#define AQ_DEVICE_ID_AQC111E 0x51B1 -#define AQ_DEVICE_ID_AQC112E 0x52B1 - #define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter" #define AQ_HWREV_ANY 0 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c new file mode 100644 index 000000000000..f5a92b2a5cd6 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (C) 2014-2019 aQuantia Corporation. */ + +/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/ + +#include <linux/init.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/stat.h> +#include <linux/string.h> +#include <linux/hwmon.h> +#include <linux/uaccess.h> + +#include "aq_drvinfo.h" + +static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct aq_nic_s *aq_nic = dev_get_drvdata(dev); + int temp; + int err; + + if (!aq_nic) + return -EIO; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + if (!aq_nic->aq_fw_ops->get_phy_temp) + return -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_input: + err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp); + *value = temp; + return err; + default: + return -EOPNOTSUPP; + } +} + +static int aq_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct aq_nic_s *aq_nic = dev_get_drvdata(dev); + + if (!aq_nic) + return -EIO; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + if (!aq_nic->aq_fw_ops->get_phy_temp) + return -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_label: + *str = "PHY Temperature"; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static umode_t aq_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_label: + return 0444; + default: + return 0; + } +} + +static const struct hwmon_ops aq_hwmon_ops = { + .is_visible = aq_hwmon_is_visible, + .read = aq_hwmon_read, + .read_string = aq_hwmon_read_string, +}; + +static u32 aq_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_LABEL, + 0, +}; + +static const struct hwmon_channel_info aq_hwmon_temp = { + .type = hwmon_temp, + .config = aq_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *aq_hwmon_info[] = { + &aq_hwmon_temp, + NULL, +}; + +static const struct hwmon_chip_info aq_hwmon_chip_info = { + .ops = &aq_hwmon_ops, + .info = aq_hwmon_info, +}; + +int aq_drvinfo_init(struct net_device *ndev) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct device *dev = &aq_nic->pdev->dev; + struct device *hwmon_dev; + int err = 0; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, + ndev->name, + aq_nic, + &aq_hwmon_chip_info, + NULL); + + if (IS_ERR(hwmon_dev)) + err = PTR_ERR(hwmon_dev); + + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h new file mode 100644 index 000000000000..41fbb1358068 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/ + +#ifndef AQ_DRVINFO_H +#define AQ_DRVINFO_H + +#include "aq_nic.h" +#include "aq_hw.h" +#include "hw_atl/hw_atl_utils.h" + +int aq_drvinfo_init(struct net_device *ndev); + +#endif /* AQ_DRVINFO_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a718d7a1f76c..79da48094770 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -405,8 +405,10 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) if (!aq_nic->aq_fw_ops->get_eee_rate) return -EOPNOTSUPP; + mutex_lock(&aq_nic->fwreq_mutex); err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, &supported_rates); + mutex_unlock(&aq_nic->fwreq_mutex); if (err < 0) return err; @@ -439,8 +441,10 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) !aq_nic->aq_fw_ops->set_eee_rate)) return -EOPNOTSUPP; + mutex_lock(&aq_nic->fwreq_mutex); err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, &supported_rates); + mutex_unlock(&aq_nic->fwreq_mutex); if (err < 0) return err; @@ -452,20 +456,28 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) cfg->eee_speeds = 0; } - return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate); + mutex_lock(&aq_nic->fwreq_mutex); + err = aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate); + mutex_unlock(&aq_nic->fwreq_mutex); + + return err; } static int aq_ethtool_nway_reset(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); + int err = 0; if (unlikely(!aq_nic->aq_fw_ops->renegotiate)) return -EOPNOTSUPP; - if (netif_running(ndev)) - return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw); + if (netif_running(ndev)) { + mutex_lock(&aq_nic->fwreq_mutex); + err = aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw); + mutex_unlock(&aq_nic->fwreq_mutex); + } - return 0; + return err; } static void aq_ethtool_get_pauseparam(struct net_device *ndev, @@ -503,7 +515,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev, else aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX; + mutex_lock(&aq_nic->fwreq_mutex); err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw); + mutex_unlock(&aq_nic->fwreq_mutex); return err; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 81aab73dc22f..95fd6c852a9d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -88,6 +88,8 @@ struct aq_stats_s { #define AQ_HW_IRQ_MSI 2U #define AQ_HW_IRQ_MSIX 3U +#define AQ_HW_SERVICE_IRQS 1U + #define AQ_HW_POWER_STATE_D0 0U #define AQ_HW_POWER_STATE_D3 3U @@ -259,6 +261,8 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); + int (*get_phy_temp)(struct aq_hw_s *self, int *temp); + u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode); int (*set_flow_control)(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index d526c4f19d34..22a1c784dc9c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -53,6 +53,18 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) writel(value, hw->mmio + reg); } +/* Most of 64-bit registers are in LSW, MSW form. + Counters are normally implemented by HW as latched pairs: + reading LSW first locks MSW, to overcome LSW overflow + */ +u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg) +{ + u64 value = aq_hw_read_reg(hw, reg); + + value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32; + return value; +} + int aq_hw_err_from_flags(struct aq_hw_s *hw) { int err = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index bc711238ca0c..bf73428ed689 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -35,6 +35,7 @@ void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); +u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg); int aq_hw_err_from_flags(struct aq_hw_s *hw); #endif /* AQ_HW_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 2a11c1eefd8f..7f45e9908582 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -23,8 +23,17 @@ MODULE_VERSION(AQ_CFG_DRV_VERSION); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); +const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME; + static const struct net_device_ops aq_ndev_ops; +static struct workqueue_struct *aq_ndev_wq; + +void aq_ndev_schedule_work(struct work_struct *work) +{ + queue_work(aq_ndev_wq, work); +} + struct net_device *aq_ndev_alloc(void) { struct net_device *ndev = NULL; @@ -209,3 +218,35 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, }; + +static int __init aq_ndev_init_module(void) +{ + int ret; + + aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name); + if (!aq_ndev_wq) { + pr_err("Failed to create workqueue\n"); + return -ENOMEM; + } + + ret = aq_pci_func_register_driver(); + if (ret) { + destroy_workqueue(aq_ndev_wq); + return ret; + } + + return 0; +} + +static void __exit aq_ndev_exit_module(void) +{ + aq_pci_func_unregister_driver(); + + if (aq_ndev_wq) { + destroy_workqueue(aq_ndev_wq); + aq_ndev_wq = NULL; + } +} + +module_init(aq_ndev_init_module); +module_exit(aq_ndev_exit_module); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index ce92152eb43e..5448b82fb7ea 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -13,7 +13,9 @@ #define AQ_MAIN_H #include "aq_common.h" +#include "aq_nic.h" +void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); #endif /* AQ_MAIN_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ff83667410bd..e82d25a91bc1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -14,6 +14,7 @@ #include "aq_vec.h" #include "aq_hw.h" #include "aq_pci_func.h" +#include "aq_main.h" #include <linux/moduleparam.h> #include <linux/netdevice.h> @@ -73,6 +74,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->tx_itr = aq_itr_tx; cfg->rx_itr = aq_itr_rx; + cfg->rxpageorder = AQ_CFG_RX_PAGEORDER; cfg->is_rss = AQ_CFG_IS_RSS_DEF; cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF; @@ -91,7 +93,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) /*rss rings */ cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF); cfg->vecs = min(cfg->vecs, num_online_cpus()); - cfg->vecs = min(cfg->vecs, self->irqvecs); + if (self->irqvecs > AQ_HW_SERVICE_IRQS) + cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS); /* cfg->vecs should be power of 2 for RSS */ if (cfg->vecs >= 8U) cfg->vecs = 8U; @@ -115,6 +118,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->vecs = 1U; } + /* Check if we have enough vectors allocated for + * link status IRQ. If no - we'll know link state from + * slower service task. + */ + if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs) + cfg->link_irq_vec = cfg->vecs; + else + cfg->link_irq_vec = 0; + cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; cfg->features = cfg->aq_hw_caps->hw_features; } @@ -160,30 +172,48 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) return 0; } -static void aq_nic_service_timer_cb(struct timer_list *t) +static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private) { - struct aq_nic_s *self = from_timer(self, t, service_timer); - int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL; - int err = 0; + struct aq_nic_s *self = private; + + if (!self) + return IRQ_NONE; + + aq_nic_update_link_status(self); + + self->aq_hw_ops->hw_irq_enable(self->aq_hw, + BIT(self->aq_nic_cfg.link_irq_vec)); + return IRQ_HANDLED; +} + +static void aq_nic_service_task(struct work_struct *work) +{ + struct aq_nic_s *self = container_of(work, struct aq_nic_s, + service_task); + int err; if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY)) - goto err_exit; + return; err = aq_nic_update_link_status(self); if (err) - goto err_exit; + return; + mutex_lock(&self->fwreq_mutex); if (self->aq_fw_ops->update_stats) self->aq_fw_ops->update_stats(self->aq_hw); + mutex_unlock(&self->fwreq_mutex); aq_nic_update_ndev_stats(self); +} + +static void aq_nic_service_timer_cb(struct timer_list *t) +{ + struct aq_nic_s *self = from_timer(self, t, service_timer); - /* If no link - use faster timer rate to detect link up asap */ - if (!netif_carrier_ok(self->ndev)) - ctimer = max(ctimer / 2, 1); + mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); -err_exit: - mod_timer(&self->service_timer, jiffies + ctimer); + aq_ndev_schedule_work(&self->service_task); } static void aq_nic_polling_timer_cb(struct timer_list *t) @@ -213,8 +243,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self) if (err) goto err_exit; + mutex_lock(&self->fwreq_mutex); err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, self->ndev->dev_addr); + mutex_unlock(&self->fwreq_mutex); if (err) goto err_exit; @@ -283,7 +315,9 @@ int aq_nic_init(struct aq_nic_s *self) unsigned int i = 0U; self->power_state = AQ_HW_POWER_STATE_D0; + mutex_lock(&self->fwreq_mutex); err = self->aq_hw_ops->hw_reset(self->aq_hw); + mutex_unlock(&self->fwreq_mutex); if (err < 0) goto err_exit; @@ -333,9 +367,11 @@ int aq_nic_start(struct aq_nic_s *self) err = aq_nic_update_interrupt_moderation_settings(self); if (err) goto err_exit; + + INIT_WORK(&self->service_task, aq_nic_service_task); + timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); - mod_timer(&self->service_timer, jiffies + - AQ_CFG_SERVICE_TIMER_INTERVAL); + aq_nic_service_timer_cb(&self->service_timer); if (self->aq_nic_cfg.is_polling) { timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); @@ -344,13 +380,25 @@ int aq_nic_start(struct aq_nic_s *self) } else { for (i = 0U, aq_vec = self->aq_vec[0]; self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { - err = aq_pci_func_alloc_irq(self, i, - self->ndev->name, aq_vec, + err = aq_pci_func_alloc_irq(self, i, self->ndev->name, + aq_vec_isr, aq_vec, aq_vec_get_affinity_mask(aq_vec)); if (err < 0) goto err_exit; } + if (self->aq_nic_cfg.link_irq_vec) { + int irqvec = pci_irq_vector(self->pdev, + self->aq_nic_cfg.link_irq_vec); + err = request_threaded_irq(irqvec, NULL, + aq_linkstate_threaded_isr, + IRQF_SHARED, + self->ndev->name, self); + if (err < 0) + goto err_exit; + self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec); + } + err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, AQ_CFG_IRQ_MASK); if (err < 0) @@ -652,7 +700,14 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) unsigned int i = 0U; unsigned int count = 0U; struct aq_vec_s *aq_vec = NULL; - struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); + struct aq_stats_s *stats; + + if (self->aq_fw_ops->update_stats) { + mutex_lock(&self->fwreq_mutex); + self->aq_fw_ops->update_stats(self->aq_hw); + mutex_unlock(&self->fwreq_mutex); + } + stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); if (!stats) goto err_exit; @@ -698,11 +753,12 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self) struct net_device *ndev = self->ndev; struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw); - ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc; - ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc; + ndev->stats.rx_packets = stats->dma_pkt_rc; + ndev->stats.rx_bytes = stats->dma_oct_rc; ndev->stats.rx_errors = stats->erpr; - ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc; - ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc; + ndev->stats.rx_dropped = stats->dpc; + ndev->stats.tx_packets = stats->dma_pkt_tc; + ndev->stats.tx_bytes = stats->dma_oct_tc; ndev->stats.tx_errors = stats->erpt; ndev->stats.multicast = stats->mprc; } @@ -839,7 +895,9 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, self->aq_nic_cfg.is_autoneg = false; } + mutex_lock(&self->fwreq_mutex); err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate); + mutex_unlock(&self->fwreq_mutex); if (err < 0) goto err_exit; @@ -872,6 +930,7 @@ int aq_nic_stop(struct aq_nic_s *self) netif_carrier_off(self->ndev); del_timer_sync(&self->service_timer); + cancel_work_sync(&self->service_task); self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); @@ -899,14 +958,22 @@ void aq_nic_deinit(struct aq_nic_s *self) self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) aq_vec_deinit(aq_vec); - self->aq_fw_ops->deinit(self->aq_hw); + if (likely(self->aq_fw_ops->deinit)) { + mutex_lock(&self->fwreq_mutex); + self->aq_fw_ops->deinit(self->aq_hw); + mutex_unlock(&self->fwreq_mutex); + } if (self->power_state != AQ_HW_POWER_STATE_D0 || - self->aq_hw->aq_nic_cfg->wol) { - self->aq_fw_ops->set_power(self->aq_hw, - self->power_state, - self->ndev->dev_addr); - } + self->aq_hw->aq_nic_cfg->wol) + if (likely(self->aq_fw_ops->set_power)) { + mutex_lock(&self->fwreq_mutex); + self->aq_fw_ops->set_power(self->aq_hw, + self->power_state, + self->ndev->dev_addr); + mutex_unlock(&self->fwreq_mutex); + } + err_exit:; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 8e34c1e49bf2..c03d38ed105d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -26,11 +26,13 @@ struct aq_nic_cfg_s { u64 features; u32 rxds; /* rx ring size, descriptors # */ u32 txds; /* tx ring size, descriptors # */ - u32 vecs; /* vecs==allocated irqs */ + u32 vecs; /* allocated rx/tx vectors */ + u32 link_irq_vec; u32 irq_type; u32 itr; u16 rx_itr; u16 tx_itr; + u32 rxpageorder; u32 num_rss_queues; u32 mtu; u32 flow_control; @@ -91,6 +93,7 @@ struct aq_nic_s { const struct aq_fw_ops *aq_fw_ops; struct aq_nic_cfg_s aq_nic_cfg; struct timer_list service_timer; + struct work_struct service_task; struct timer_list polling_timer; struct aq_hw_link_status_s link_status; struct { @@ -103,6 +106,8 @@ struct aq_nic_s { struct pci_dev *pdev; unsigned int msix_entry_mask; u32 irqvecs; + /* mutex to serialize FW interface access operations */ + struct mutex fwreq_mutex; struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 0217ff4669a4..9cb0864d6d8d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -20,6 +20,7 @@ #include "hw_atl/hw_atl_a0.h" #include "hw_atl/hw_atl_b0.h" #include "aq_filters.h" +#include "aq_drvinfo.h" static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), }, @@ -42,9 +43,6 @@ static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), }, { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), }, - { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), }, - { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), }, - {} }; @@ -74,9 +72,6 @@ static const struct aq_board_revision_s hw_atl_boards[] = { { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, }, { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, }, { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, }, - - { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, }, - { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, }, }; MODULE_DEVICE_TABLE(pci, aq_pci_tbl); @@ -139,26 +134,27 @@ err_exit: } int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, - char *name, void *aq_vec, cpumask_t *affinity_mask) + char *name, irq_handler_t irq_handler, + void *irq_arg, cpumask_t *affinity_mask) { struct pci_dev *pdev = self->pdev; int err; if (pdev->msix_enabled || pdev->msi_enabled) - err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, - name, aq_vec); + err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0, + name, irq_arg); else err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, - IRQF_SHARED, name, aq_vec); + IRQF_SHARED, name, irq_arg); if (err >= 0) { self->msix_entry_mask |= (1 << i); - self->aq_vec[i] = aq_vec; - if (pdev->msix_enabled) + if (pdev->msix_enabled && affinity_mask) irq_set_affinity_hint(pci_irq_vector(pdev, i), affinity_mask); } + return err; } @@ -166,16 +162,22 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self) { struct pci_dev *pdev = self->pdev; unsigned int i; + void *irq_data; for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) continue; - if (i >= AQ_CFG_VECS_MAX) + if (self->aq_nic_cfg.link_irq_vec && + i == self->aq_nic_cfg.link_irq_vec) + irq_data = self; + else if (i < AQ_CFG_VECS_MAX) + irq_data = self->aq_vec[i]; + else continue; if (pdev->msix_enabled) irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); - free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); + free_irq(pci_irq_vector(pdev, i), irq_data); self->msix_entry_mask &= ~(1U << i); } } @@ -185,7 +187,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self) if (self->pdev->msix_enabled) return AQ_HW_IRQ_MSIX; if (self->pdev->msi_enabled) - return AQ_HW_IRQ_MSIX; + return AQ_HW_IRQ_MSI; return AQ_HW_IRQ_LEGACY; } @@ -223,6 +225,8 @@ static int aq_pci_probe(struct pci_dev *pdev, SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, self); + mutex_init(&self->fwreq_mutex); + err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops, &aq_nic_get_cfg(self)->aq_hw_caps); if (err) @@ -268,6 +272,7 @@ static int aq_pci_probe(struct pci_dev *pdev, numvecs = min((u8)AQ_CFG_VECS_DEF, aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs); numvecs = min(numvecs, num_online_cpus()); + numvecs += AQ_HW_SERVICE_IRQS; /*enable interrupts */ #if !AQ_CFG_FORCE_LEGACY_INT err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, @@ -289,6 +294,8 @@ static int aq_pci_probe(struct pci_dev *pdev, if (err < 0) goto err_register; + aq_drvinfo_init(ndev); + return 0; err_register: @@ -365,4 +372,13 @@ static struct pci_driver aq_pci_ops = { .shutdown = aq_pci_shutdown, }; -module_pci_driver(aq_pci_ops); +int aq_pci_func_register_driver(void) +{ + return pci_register_driver(&aq_pci_ops); +} + +void aq_pci_func_unregister_driver(void) +{ + pci_unregister_driver(&aq_pci_ops); +} + diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h index aeee67bf69fa..670f9a940d65 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -24,9 +24,12 @@ struct aq_board_revision_s { int aq_pci_func_init(struct pci_dev *pdev); int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, - char *name, void *aq_vec, - cpumask_t *affinity_mask); + char *name, irq_handler_t irq_handler, + void *irq_arg, cpumask_t *affinity_mask); void aq_pci_func_free_irqs(struct aq_nic_s *self); unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self); +int aq_pci_func_register_driver(void); +void aq_pci_func_unregister_driver(void); + #endif /* AQ_PCI_FUNC_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index e2ffb159cbe2..350e385528fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -12,10 +12,89 @@ #include "aq_ring.h" #include "aq_nic.h" #include "aq_hw.h" +#include "aq_hw_utils.h" #include <linux/netdevice.h> #include <linux/etherdevice.h> +static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) +{ + unsigned int len = PAGE_SIZE << rxpage->order; + + dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE); + + /* Drop the ref for being in the ring. */ + __free_pages(rxpage->page, rxpage->order); + rxpage->page = NULL; +} + +static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, + struct device *dev) +{ + struct page *page; + dma_addr_t daddr; + int ret = -ENOMEM; + + page = dev_alloc_pages(order); + if (unlikely(!page)) + goto err_exit; + + daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(dev, daddr))) + goto free_page; + + rxpage->page = page; + rxpage->daddr = daddr; + rxpage->order = order; + rxpage->pg_off = 0; + + return 0; + +free_page: + __free_pages(page, order); + +err_exit: + return ret; +} + +static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, + int order) +{ + int ret; + + if (rxbuf->rxdata.page) { + /* One means ring is the only user and can reuse */ + if (page_ref_count(rxbuf->rxdata.page) > 1) { + /* Try reuse buffer */ + rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; + if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= + (PAGE_SIZE << order)) { + self->stats.rx.pg_flips++; + } else { + /* Buffer exhausted. We have other users and + * should release this page and realloc + */ + aq_free_rxpage(&rxbuf->rxdata, + aq_nic_get_dev(self->aq_nic)); + self->stats.rx.pg_losts++; + } + } else { + rxbuf->rxdata.pg_off = 0; + self->stats.rx.pg_reuses++; + } + } + + if (!rxbuf->rxdata.page) { + ret = aq_get_rxpage(&rxbuf->rxdata, order, + aq_nic_get_dev(self->aq_nic)); + return ret; + } + + return 0; +} + static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { @@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, self->idx = idx; self->size = aq_nic_cfg->rxds; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; + self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + + (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; + + if (aq_nic_cfg->rxpageorder > self->page_order) + self->page_order = aq_nic_cfg->rxpageorder; self = aq_ring_alloc(self, aq_nic); if (!self) { @@ -201,22 +285,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); - int err = 0; bool is_rsc_completed = true; + int err = 0; for (; (self->sw_head != self->hw_head) && budget; self->sw_head = aq_ring_next_dx(self, self->sw_head), --budget, ++(*work_done)) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; + struct aq_ring_buff_s *buff_ = NULL; struct sk_buff *skb = NULL; unsigned int next_ = 0U; unsigned int i = 0U; - struct aq_ring_buff_s *buff_ = NULL; + u16 hdr_len; - if (buff->is_error) { - __free_pages(buff->page, 0); + if (buff->is_error) continue; - } if (buff->is_cleaned) continue; @@ -246,45 +329,67 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } } + dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), + buff->rxdata.daddr, + buff->rxdata.pg_off, + buff->len, DMA_FROM_DEVICE); + /* for single fragment packets use build_skb() */ if (buff->is_eop && buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { - skb = build_skb(page_address(buff->page), + skb = build_skb(aq_buf_vaddr(&buff->rxdata), AQ_CFG_RX_FRAME_MAX); if (unlikely(!skb)) { err = -ENOMEM; goto err_exit; } - skb_put(skb, buff->len); + page_ref_inc(buff->rxdata.page); } else { - skb = netdev_alloc_skb(ndev, ETH_HLEN); + skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); if (unlikely(!skb)) { err = -ENOMEM; goto err_exit; } - skb_put(skb, ETH_HLEN); - memcpy(skb->data, page_address(buff->page), ETH_HLEN); - skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, - buff->len - ETH_HLEN, - SKB_TRUESIZE(buff->len - ETH_HLEN)); + hdr_len = buff->len; + if (hdr_len > AQ_CFG_RX_HDR_SIZE) + hdr_len = eth_get_headlen(skb->dev, + aq_buf_vaddr(&buff->rxdata), + AQ_CFG_RX_HDR_SIZE); + + memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), + ALIGN(hdr_len, sizeof(long))); + + if (buff->len - hdr_len > 0) { + skb_add_rx_frag(skb, 0, buff->rxdata.page, + buff->rxdata.pg_off + hdr_len, + buff->len - hdr_len, + AQ_CFG_RX_FRAME_MAX); + page_ref_inc(buff->rxdata.page); + } if (!buff->is_eop) { - for (i = 1U, next_ = buff->next, - buff_ = &self->buff_ring[next_]; - true; next_ = buff_->next, - buff_ = &self->buff_ring[next_], ++i) { - skb_add_rx_frag(skb, i, - buff_->page, 0, + buff_ = buff; + i = 1U; + do { + next_ = buff_->next, + buff_ = &self->buff_ring[next_]; + + dma_sync_single_range_for_cpu( + aq_nic_get_dev(self->aq_nic), + buff_->rxdata.daddr, + buff_->rxdata.pg_off, buff_->len, - SKB_TRUESIZE(buff->len - - ETH_HLEN)); + DMA_FROM_DEVICE); + skb_add_rx_frag(skb, i++, + buff_->rxdata.page, + buff_->rxdata.pg_off, + buff_->len, + AQ_CFG_RX_FRAME_MAX); + page_ref_inc(buff_->rxdata.page); buff_->is_cleaned = 1; - - if (buff_->is_eop) - break; - } + } while (!buff_->is_eop); } } @@ -310,12 +415,15 @@ err_exit: int aq_ring_rx_fill(struct aq_ring_s *self) { - unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; + unsigned int page_order = self->page_order; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; + if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, + self->size / 2)) + return err; + for (i = aq_ring_avail_dx(self); i--; self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { buff = &self->buff_ring[self->sw_tail]; @@ -323,30 +431,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff->flags = 0U; buff->len = AQ_CFG_RX_FRAME_MAX; - buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order); - if (!buff->page) { - err = -ENOMEM; + err = aq_get_rxpages(self, buff, page_order); + if (err) goto err_exit; - } - - buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), - buff->page, 0, - AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); - - if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { - err = -ENOMEM; - goto err_exit; - } + buff->pa = aq_buf_daddr(&buff->rxdata); buff = NULL; } err_exit: - if (err < 0) { - if (buff && buff->page) - __free_pages(buff->page, 0); - } - return err; } @@ -359,10 +452,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self) self->sw_head = aq_ring_next_dx(self, self->sw_head)) { struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; - dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, - AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); - - __free_pages(buff->page, 0); + aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); } err_exit:; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index ac1329f4051d..cfffc301e746 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -17,6 +17,13 @@ struct page; struct aq_nic_cfg_s; +struct aq_rxpage { + struct page *page; + dma_addr_t daddr; + unsigned int order; + unsigned int pg_off; +}; + /* TxC SOP DX EOP * +----------+----------+----------+----------- * 8bytes|len l3,l4 | pa | pa | pa @@ -31,28 +38,21 @@ struct aq_nic_cfg_s; */ struct __packed aq_ring_buff_s { union { + /* RX/TX */ + dma_addr_t pa; /* RX */ struct { u32 rss_hash; u16 next; u8 is_hash_l4; u8 rsvd1; - struct page *page; + struct aq_rxpage rxdata; }; /* EOP */ struct { dma_addr_t pa_eop; struct sk_buff *skb; }; - /* DX */ - struct { - dma_addr_t pa; - }; - /* SOP */ - struct { - dma_addr_t pa_sop; - u32 len_pkt_sop; - }; /* TxC */ struct { u32 mss; @@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s { u64 bytes; u64 lro_packets; u64 jumbo_packets; + u64 pg_losts; + u64 pg_flips; + u64 pg_reuses; }; struct aq_ring_stats_tx_s { @@ -116,6 +119,7 @@ struct aq_ring_s { unsigned int size; /* descriptors number */ unsigned int dx_size; /* TX or RX descriptor size, */ /* stored here for fater math */ + unsigned int page_order; union aq_ring_stats_s stats; dma_addr_t dx_ring_pa; }; @@ -126,6 +130,16 @@ struct aq_ring_param_s { cpumask_t affinity_mask; }; +static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage) +{ + return page_to_virt(rxpage->page) + rxpage->pg_off; +} + +static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage) +{ + return rxpage->daddr + rxpage->pg_off; +} + static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self, unsigned int dx) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d335c334fa56..a2e4ca1782ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self, stats_rx->errors += rx->errors; stats_rx->jumbo_packets += rx->jumbo_packets; stats_rx->lro_packets += rx->lro_packets; + stats_rx->pg_losts += rx->pg_losts; + stats_rx->pg_flips += rx->pg_flips; + stats_rx->pg_reuses += rx->pg_reuses; stats_tx->packets += tx->packets; stats_tx->bytes += tx->bytes; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index f6f8338153a2..9fe507fe2d7f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -350,10 +350,10 @@ err_exit: static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { - { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ - { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ - { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ - { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, + [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, + [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, + [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; int err = 0; @@ -619,8 +619,6 @@ err_exit: static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) { - struct device *ndev = aq_nic_get_dev(ring->aq_nic); - for (; ring->hw_head != ring->sw_tail; ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { struct aq_ring_buff_s *buff = NULL; @@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, is_err &= ~0x18U; is_err &= ~0x04U; - dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); - if (is_err || rxd_wb->type & 0x1000U) { /* status error or DMA error */ buff->is_error = 1U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index b31dba1b1a55..bfcda12d73de 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -259,7 +259,13 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU); hw_atl_rpo_lro_inactive_interval_set(self, 0); - hw_atl_rpo_lro_max_coalescing_interval_set(self, 2); + /* the LRO timebase divider is 5 uS (0x61a), + * which is multiplied by 50(0x32) + * to get a maximum coalescing interval of 250 uS, + * which is the default value + */ + hw_atl_rpo_lro_max_coalescing_interval_set(self, 50); + hw_atl_rpo_lro_qsessions_lim_set(self, 1U); @@ -273,6 +279,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, hw_atl_rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + hw_atl_itr_rsc_en_set(self, + aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U); + + hw_atl_itr_rsc_delay_set(self, 1U); } return aq_hw_err_from_flags(self); } @@ -378,10 +388,10 @@ err_exit: static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) { static u32 aq_hw_atl_igcr_table_[4][2] = { - { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */ - { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */ - { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */ - { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */ + [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, + [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, + [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, + [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; int err = 0; @@ -433,6 +443,11 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr) ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) | ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U); + /* Enable link interrupt */ + if (aq_nic_cfg->link_irq_vec) + hw_atl_reg_gen_irq_map_set(self, BIT(7) | + aq_nic_cfg->link_irq_vec, 3U); + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); err_exit: @@ -654,8 +669,6 @@ err_exit: static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) { - struct device *ndev = aq_nic_get_dev(ring->aq_nic); - for (; ring->hw_head != ring->sw_tail; ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) { struct aq_ring_buff_s *buff = NULL; @@ -697,8 +710,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, buff->is_cso_err = 0U; } - dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE); - if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { /* MAC error or DMA error */ buff->is_error = 1U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index 2cc8dacfdc27..b1c0b6850e60 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -32,9 +32,6 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; #define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 #define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 -#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109 - extern const struct aq_hw_ops hw_atl_ops_b0; #define hw_atl_ops_b1 hw_atl_ops_b0 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index b318eefd36ae..ea98a08d7820 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -78,7 +78,7 @@ #define HW_ATL_B0_TC_MAX 1U #define HW_ATL_B0_RSS_MAX 8U -#define HW_ATL_B0_LRO_RXD_MAX 2U +#define HW_ATL_B0_LRO_RXD_MAX 16U #define HW_ATL_B0_RS_SLIP_ENABLED 0U /* (256k -1(max pay_len) - 54(header)) */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 0722b8e01964..eaab25cd08b3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -49,11 +49,6 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw) HW_ATL_GLB_SOFT_RES_SHIFT); } -u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw) -{ - return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR); -} - u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw) { return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR); @@ -65,44 +60,24 @@ u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw) return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR); } -u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) -{ - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW); -} - -u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW); + return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW); } -u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw) +u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW); + return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW); } -u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw) +u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW); + return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW); } -u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) +u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw) { - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW); -} - -u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) -{ - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW); -} - -u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw) -{ - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW); -} - -u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw) -{ - return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW); + return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW); } /* interrupt */ @@ -315,6 +290,21 @@ void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq) HW_ATL_ITR_RES_SHIFT, res_irq); } +/* set RSC interrupt */ +void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable) +{ + aq_hw_write_reg(aq_hw, HW_ATL_ITR_RSC_EN_ADR, enable); +} + +/* set RSC delay */ +void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RSC_DELAY_ADR, + HW_ATL_ITR_RSC_DELAY_MSK, + HW_ATL_ITR_RSC_DELAY_SHIFT, + delay); +} + /* rdm */ void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index d46351890b16..2eb44e1cff70 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -40,29 +40,17 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw); u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw); -/* get rx dma good octet counter lsw */ -u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); +/* get rx dma good octet counter */ +u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw); -/* get rx dma good packet counter lsw */ -u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); +/* get rx dma good packet counter */ +u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw); -/* get tx dma good octet counter lsw */ -u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw); +/* get tx dma good octet counter */ +u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw); -/* get tx dma good packet counter lsw */ -u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw); - -/* get rx dma good octet counter msw */ -u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); - -/* get rx dma good packet counter msw */ -u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); - -/* get tx dma good octet counter msw */ -u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw); - -/* get tx dma good packet counter msw */ -u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw); +/* get tx dma good packet counter */ +u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw); /* get msm rx errors counter register */ u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw); @@ -82,9 +70,6 @@ u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw); /* get msm rx unicast octets counter register 0 */ u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw); -/* get rx dma statistics counter 7 */ -u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw); - /* get msm tx errors counter register */ u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw); @@ -152,6 +137,12 @@ u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw); /* set reset interrupt */ void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq); +/* set RSC interrupt */ +void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable); + +/* set RSC delay */ +void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay); + /* rdm */ /* set cpu id */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index fb45bc2d99cf..b64140924a02 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -58,9 +58,6 @@ /* preprocessor definitions for msm rx unicast octets counter register 0 */ #define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u -/* preprocessor definitions for rx dma statistics counter 7 */ -#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u - /* preprocessor definitions for msm tx unicast frames counter register */ #define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u @@ -95,6 +92,19 @@ #define HW_ATL_ITR_RES_MSK 0x80000000 /* lower bit position of bitfield itr_reset */ #define HW_ATL_ITR_RES_SHIFT 31 + +/* register address for bitfield rsc_en */ +#define HW_ATL_ITR_RSC_EN_ADR 0x00002200 + +/* register address for bitfield rsc_delay */ +#define HW_ATL_ITR_RSC_DELAY_ADR 0x00002204 +/* bitmask for bitfield rsc_delay */ +#define HW_ATL_ITR_RSC_DELAY_MSK 0x0000000f +/* width of bitfield rsc_delay */ +#define HW_ATL_ITR_RSC_DELAY_WIDTH 4 +/* lower bit position of bitfield rsc_delay */ +#define HW_ATL_ITR_RSC_DELAY_SHIFT 0 + /* register address for bitfield dca{d}_cpuid[7:0] */ #define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4) /* bitmask for bitfield dca{d}_cpuid[7:0] */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index eb4b99d56081..1208f7ecdd76 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -545,7 +545,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, pmbox->stats.ubtc = pmbox->stats.uptc * mtu; pmbox->stats.dpc = atomic_read(&self->dpc); } else { - pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self); + pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); } err_exit:; @@ -763,6 +763,7 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) int hw_atl_utils_update_stats(struct aq_hw_s *self) { struct hw_atl_utils_mbox mbox; + struct aq_stats_s *cs = &self->curr_stats; hw_atl_utils_mpi_read_stats(self, &mbox); @@ -789,10 +790,11 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) AQ_SDELTA(dpc); } #undef AQ_SDELTA - self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self); - self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self); - self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self); - self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self); + + cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); + cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); + cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); + cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats)); @@ -960,6 +962,7 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .get_phy_temp = NULL, .set_power = aq_fw1x_set_power, .set_eee_rate = NULL, .get_eee_rate = NULL, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index fe6c5658e016..fbc9d6ac841f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -38,6 +38,7 @@ #define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL) #define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP) #define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE) +#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE) #define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE) #define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT) @@ -310,6 +311,40 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + u32 temp_val = mpi_opts & HW_ATL_FW2X_CTRL_TEMPERATURE; + u32 phy_temp_offset; + u32 temp_res; + int err = 0; + u32 val; + + phy_temp_offset = self->mbox_addr + + offsetof(struct hw_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, phy_temperature); + /* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */ + mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + /* Wait FW to report back */ + err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val, + temp_val != + (val & HW_ATL_FW2X_CTRL_TEMPERATURE), + 1U, 10000U); + err = hw_atl_utils_fw_downld_dwords(self, phy_temp_offset, + &temp_res, 1); + + if (err) + return err; + + /* Convert PHY temperature from 1/256 degree Celsius + * to 1/1000 degree Celsius. + */ + *temp = temp_res * 1000 / 256; + + return 0; +} + static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) { struct hw_atl_utils_fw_rpc *rpc = NULL; @@ -509,6 +544,7 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, + .get_phy_temp = aq_fw2x_get_phy_temp, .set_power = aq_fw2x_set_power, .set_eee_rate = aq_fw2x_set_eee_rate, .get_eee_rate = aq_fw2x_get_eee_rate, diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 6f56276015a4..f62deeb6e941 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int dma_len; unsigned int align; unsigned int next; + bool xmit_more; if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { netif_stop_queue(dev); @@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + xmit_more = netdev_xmit_more(); if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { netif_stop_queue(dev); - skb->xmit_more = 0; + xmit_more = false; } next = priv->tx_next; @@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) desc->n_addr = priv->tx_bufs[next].dma_desc; desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; - if (!skb->xmit_more) + if (!xmit_more) desc->config |= DESC_EOC; txb->skb = skb; @@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) priv->tx_next = next; - if (!skb->xmit_more) { + if (!xmit_more) { smp_wmb(); priv->tx_chain->ready = true; priv->tx_chain = NULL; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 716bfbba59cf..461b2c0b2ed6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -196,6 +196,7 @@ config BNXT depends on PCI select FW_LOADER select LIBCRC32C + select NET_DEVLINK ---help--- This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit Ethernet cards. To compile this driver as a module, choose M here: diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index bc3ac369cbe3..4e87a303f83e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -116,15 +116,6 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); } -static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, - struct dma_desc *desc, - unsigned int port) -{ - /* Ports are latched, so write upper address first */ - tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); - tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); -} - /* Ethtool operations */ static void bcm_sysport_set_rx_csum(struct net_device *dev, netdev_features_t wanted) @@ -1291,11 +1282,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, struct bcm_sysport_tx_ring *ring; struct bcm_sysport_cb *cb; struct netdev_queue *txq; - struct dma_desc *desc; + u32 len_status, addr_lo; unsigned int skb_len; unsigned long flags; dma_addr_t mapping; - u32 len_status; u16 queue; int ret; @@ -1338,10 +1328,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, dma_unmap_addr_set(cb, dma_addr, mapping); dma_unmap_len_set(cb, dma_len, skb_len); - /* Fetch a descriptor entry from our pool */ - desc = ring->desc_cpu; - - desc->addr_lo = lower_32_bits(mapping); + addr_lo = lower_32_bits(mapping); len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; len_status |= (skb_len << DESC_LEN_SHIFT); len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << @@ -1354,16 +1341,9 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, ring->curr_desc = 0; ring->desc_count--; - /* Ensure write completion of the descriptor status/length - * in DRAM before the System Port WRITE_PORT register latches - * the value - */ - wmb(); - desc->addr_status_len = len_status; - wmb(); - - /* Write this descriptor address to the RING write port */ - tdma_port_write_desc_addr(priv, desc, ring->index); + /* Ports are latched, so write upper address first */ + tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); + tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); /* Check ring space and update SW control flow */ if (ring->desc_count == 0) @@ -1489,28 +1469,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, unsigned int index) { struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; - struct device *kdev = &priv->pdev->dev; size_t size; - void *p; u32 reg; /* Simple descriptors partitioning for now */ size = 256; - /* We just need one DMA descriptor which is DMA-able, since writing to - * the port will allocate a new descriptor in its internal linked-list - */ - p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, - GFP_KERNEL); - if (!p) { - netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); - return -ENOMEM; - } - ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); if (!ring->cbs) { - dma_free_coherent(kdev, sizeof(struct dma_desc), - ring->desc_cpu, ring->desc_dma); netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); return -ENOMEM; } @@ -1523,7 +1489,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ring->size = size; ring->clean_index = 0; ring->alloc_size = ring->size; - ring->desc_cpu = p; ring->desc_count = ring->size; ring->curr_desc = 0; @@ -1578,8 +1543,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, napi_enable(&ring->napi); netif_dbg(priv, hw, priv->netdev, - "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", - ring->size, ring->desc_cpu, ring->switch_queue, + "TDMA cfg, size=%d, switch q=%d,port=%d\n", + ring->size, ring->switch_queue, ring->switch_port); return 0; @@ -1589,7 +1554,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, unsigned int index) { struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; - struct device *kdev = &priv->pdev->dev; u32 reg; /* Caller should stop the TDMA engine */ @@ -1611,12 +1575,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, kfree(ring->cbs); ring->cbs = NULL; - - if (ring->desc_dma) { - dma_free_coherent(kdev, sizeof(struct dma_desc), - ring->desc_cpu, ring->desc_dma); - ring->desc_dma = 0; - } ring->size = 0; ring->alloc_size = 0; @@ -2274,8 +2232,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { }; static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); u16 queue = skb_get_queue_mapping(skb); @@ -2283,7 +2240,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, unsigned int q, port; if (!netdev_uses_dsa(dev)) - return fallback(dev, skb, NULL); + return netdev_pick_tx(dev, skb, NULL); /* DSA tagging layer will have configured the correct queue */ q = BRCM_TAG_GET_QUEUE(queue); @@ -2291,7 +2248,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; if (unlikely(!tx_ring)) - return fallback(dev, skb, NULL); + return netdev_pick_tx(dev, skb, NULL); return tx_ring->index; } @@ -2599,11 +2556,11 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, - "Broadcom SYSTEMPORT%s" REV_FMT - " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + "Broadcom SYSTEMPORT%s " REV_FMT + " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", priv->is_lite ? " Lite" : "", (priv->rev >> 8) & 0xff, priv->rev & 0xff, - priv->base, priv->irq0, priv->irq1, txq, rxq); + priv->irq0, priv->irq1, txq, rxq); return 0; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0b192fea9c5d..6f3141c86436 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -516,12 +516,6 @@ struct bcm_rsb { #define TDMA_DEBUG 0x64c -/* Transmit/Receive descriptor */ -struct dma_desc { - u32 addr_status_len; - u32 addr_lo; -}; - /* Number of Receive hardware descriptor words */ #define SP_NUM_HW_RX_DESC_WORDS 1024 #define SP_LT_NUM_HW_RX_DESC_WORDS 256 @@ -530,7 +524,7 @@ struct dma_desc { #define SP_NUM_TX_DESC 1536 #define SP_LT_NUM_TX_DESC 256 -#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) +#define WORDS_PER_DESC 2 /* Rx/Tx common counter group.*/ struct bcm_sysport_pkt_counters { @@ -718,7 +712,6 @@ struct bcm_sysport_net_dim { struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ struct napi_struct napi; /* NAPI per tx queue */ - dma_addr_t desc_dma; /* DMA cookie */ unsigned int index; /* Ring index */ unsigned int size; /* Ring current size */ unsigned int alloc_size; /* Ring one-time allocated size */ @@ -727,7 +720,6 @@ struct bcm_sysport_tx_ring { unsigned int c_index; /* Last consumer index */ unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ - struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ unsigned long packets; /* packets statistics */ unsigned long bytes; /* bytes statistics */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecb1bd7eb508..6012fe61735e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct bnx2x *bp = netdev_priv(dev); @@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, } /* select a non-FCoE queue */ - return fallback(dev, skb, NULL) % + return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 2462e7aa0c5d..7f8df08a7a4c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback); + struct net_device *sb_dev); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index d9057c8bbeef..78326a6c0aba 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3024,7 +3024,7 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 13 -#define BCM_5710_FW_REVISION_VERSION 1 +#define BCM_5710_FW_REVISION_VERSION 11 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3639,8 +3639,10 @@ struct client_init_rx_data { #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 #define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) #define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2 -#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) -#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3 +#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3) +#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4 u8 vmqueue_mode_en_flg; u8 extra_data_over_sgl_en_flg; u8 cache_line_alignment_log_size; @@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header { */ struct eth_classify_header { u8 rule_cnt; - u8 reserved0; + u8 warning_on_error; __le16 reserved1; __le32 echo; }; @@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data { __le32 sge_page_base_hi; __le16 sge_pause_thr_low; __le16 sge_pause_thr_high; + u8 tpa_over_vlan_disable; + u8 reserved[7]; }; @@ -4946,7 +4950,7 @@ struct fairness_vars_per_port { u32 upper_bound; u32 fair_threshold; u32 fairness_timeout; - u32 reserved0; + u32 size_thr; }; /* @@ -5415,7 +5419,9 @@ struct function_start_data { u8 sd_vlan_force_pri_val; u8 c2s_pri_tt_valid; u8 c2s_pri_default; - u8 reserved2[6]; + u8 tx_vlan_filtering_enable; + u8 tx_vlan_filtering_use_pvid; + u8 reserved2[4]; struct c2s_pri_trans_table_entry c2s_pri_trans_table; }; @@ -5448,7 +5454,8 @@ struct function_update_data { u8 reserved1; __le16 sd_vlan_tag; __le16 sd_vlan_eth_type; - __le16 reserved0; + u8 tx_vlan_filtering_pvid_change_flg; + u8 reserved0; __le32 reserved2; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 626b491f7674..0d6c98a9e07b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -15376,27 +15376,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp) return 0; } +#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5 +#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB +#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) +#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) +#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE) +#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE) +#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA) +#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE) +#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF) +#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF) +#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA) +#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE) + int bnx2x_configure_ptp_filters(struct bnx2x *bp) { int port = BP_PORT(bp); + u32 param, rule; int rc; if (!bp->hwtstamp_ioctl_called) return 0; + param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK; + rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK; switch (bp->tx_type) { case HWTSTAMP_TX_ON: bp->flags |= TX_TIMESTAMPING_EN; - REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : - NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); - REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : - NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK); break; case HWTSTAMP_TX_ONESTEP_SYNC: BNX2X_ERR("One-step timestamping is not supported\n"); return -ERANGE; } + param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK; + rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK; switch (bp->rx_filter) { case HWTSTAMP_FILTER_NONE: break; @@ -15410,30 +15430,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp) case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK); break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK); break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; /* Initialize PTP detection L2 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: @@ -15441,10 +15455,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : - NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); - REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : - NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK); + REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK); break; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 52ade133b57c..a0de3c368f4a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -551,7 +551,7 @@ normal_tx: prod = NEXT_TX(prod); txr->tx_prod = prod; - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) bnxt_db_write(bp, &txr->tx_db, prod); tx_done: @@ -559,7 +559,7 @@ tx_done: mmiowb(); if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { - if (skb->xmit_more && !tx_buf->is_push) + if (netdev_xmit_more() && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); netif_tx_stop_queue(txq); @@ -899,7 +899,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, DMA_ATTR_WEAK_ORDERING); if (unlikely(!payload)) - payload = eth_get_headlen(data_ptr, len); + payload = eth_get_headlen(bp->dev, data_ptr, len); skb = napi_alloc_skb(&rxr->bnapi->napi, payload); if (!skb) { @@ -10068,23 +10068,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return rc; } -static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, - size_t len) -{ - struct bnxt *bp = netdev_priv(dev); - int rc; - - /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) - return -EOPNOTSUPP; - - rc = snprintf(buf, len, "p%d", bp->pf.port_id); - - if (rc >= len) - return -EOPNOTSUPP; - return 0; -} - int bnxt_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid) { @@ -10103,6 +10086,13 @@ int bnxt_get_port_parent_id(struct net_device *dev, return 0; } +static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return &bp->dl_port; +} + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -10134,8 +10124,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_bpf = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, - .ndo_get_port_parent_id = bnxt_get_port_parent_id, - .ndo_get_phys_port_name = bnxt_get_phys_port_name + .ndo_get_devlink_port = bnxt_get_devlink_port, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -10459,6 +10448,26 @@ static int bnxt_init_mac_addr(struct bnxt *bp) return rc; } +static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) +{ + struct pci_dev *pdev = bp->pdev; + int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); + u32 dw; + + if (!pos) { + netdev_info(bp->dev, "Unable do read adapter's DSN"); + return -EOPNOTSUPP; + } + + /* DSN (two dw) is at an offset of 4 from the cap pos */ + pos += 4; + pci_read_config_dword(pdev, pos, &dw); + put_unaligned_le32(dw, &dsn[0]); + pci_read_config_dword(pdev, pos + 4, &dw); + put_unaligned_le32(dw, &dsn[4]); + return 0; +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; @@ -10599,6 +10608,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; } + /* Read the adapter's DSN to use as the eswitch switch_id */ + rc = bnxt_pcie_dsn_get(bp, bp->switch_id); + if (rc) + goto init_err_pci_clean; + bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_vnic_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index e1feb97bcd81..549c90d3e465 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/netdevice.h> +#include <net/devlink.h> #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_vfr.h" @@ -228,6 +229,9 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_unreg; } + devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + bp->pf.port_id, false, 0, + bp->switch_id, sizeof(bp->switch_id)); rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 2bdd2da9aac7..f760921389a3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -406,26 +406,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->min_mtu = ETH_ZLEN; } -static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) -{ - struct pci_dev *pdev = bp->pdev; - int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); - u32 dw; - - if (!pos) { - netdev_info(bp->dev, "Unable do read adapter's DSN"); - return -EOPNOTSUPP; - } - - /* DSN (two dw) is at an offset of 4 from the cap pos */ - pos += 4; - pci_read_config_dword(pdev, pos, &dw); - put_unaligned_le32(dw, &dsn[0]); - pci_read_config_dword(pdev, pos + 4, &dw); - put_unaligned_le32(dw, &dsn[4]); - return 0; -} - static int bnxt_vf_reps_create(struct bnxt *bp) { u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); @@ -490,11 +470,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp) } } - /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto err; - /* publish cfa_code_map only after all VF-reps have been initialized */ bp->cfa_code_map = cfa_code_map; bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 510dfc1c236b..57dc3cbff36e 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4038,15 +4038,14 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) case L5CM_RAMROD_CMD_ID_CLOSE: { struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; - if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { - netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", - l4kcqe->status, l5kcqe->completion_status); - opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; - /* Fall through */ - } else { + if (l4kcqe->status == 0 && l5kcqe->completion_status == 0) break; - } + + netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", + l4kcqe->status, l5kcqe->completion_status); + opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; } + /* Fall through */ case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 983245c0867c..4fd973571e4c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) /* Packets are ready, update producer index */ bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 060a6f386104..664fedf0cd80 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8156,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - if (!skb->xmit_more || netif_xmit_stopped(txq)) { + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); mmiowb(); @@ -12763,9 +12763,6 @@ static int tg3_set_phys_id(struct net_device *dev, { struct tg3 *tp = netdev_priv(dev); - if (!netif_running(tp->dev)) - return -EAGAIN; - switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 3da2795e2486..009ed4c1baf3 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4362,8 +4362,7 @@ static int __maybe_unused macb_resume(struct device *dev) static int __maybe_unused macb_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *netdev = platform_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); if (!(device_may_wakeup(&bp->dev->dev))) { @@ -4379,8 +4378,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) static int __maybe_unused macb_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *netdev = platform_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); if (!(device_may_wakeup(&bp->dev->dev))) { diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 6650e2a5f171..7612ab6b286d 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -68,6 +68,7 @@ config LIQUIDIO imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C + select NET_DEVLINK ---help--- This driver supports Cavium LiquidIO Intelligent Server Adapters based on CN66XX, CN68XX and CN23XX chips. diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index fb6f813cff65..eab805579f96 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } - xmit_more = skb->xmit_more; + xmit_more = netdev_xmit_more(); if (unlikely(cmdsetup.s.timestamp)) status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 54b245797d2e..db0b90555acb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; } - xmit_more = skb->xmit_more; + xmit_more = netdev_xmit_more(); if (unlikely(cmdsetup.s.timestamp)) status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 673c57b8023f..81c281ada63b 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -962,13 +962,13 @@ static void bgx_poll_for_sgmii_link(struct lmac *lmac) lmac->last_duplex = (an_result >> 1) & 0x1; switch (speed) { case 0: - lmac->last_speed = 10; + lmac->last_speed = SPEED_10; break; case 1: - lmac->last_speed = 100; + lmac->last_speed = SPEED_100; break; case 2: - lmac->last_speed = 1000; + lmac->last_speed = SPEED_1000; break; default: lmac->link_up = false; @@ -1012,10 +1012,10 @@ static void bgx_poll_for_link(struct work_struct *work) !(smu_link & SMU_RX_CTL_STATUS)) { lmac->link_up = 1; if (lmac->lmac_type == BGX_MODE_XLAUI) - lmac->last_speed = 40000; + lmac->last_speed = SPEED_40000; else - lmac->last_speed = 10000; - lmac->last_duplex = 1; + lmac->last_speed = SPEED_10000; + lmac->last_duplex = DUPLEX_FULL; } else { lmac->link_up = 0; lmac->last_speed = SPEED_UNKNOWN; @@ -1105,8 +1105,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } else { /* Default to below link speed and duplex */ lmac->link_up = true; - lmac->last_speed = 1000; - lmac->last_duplex = 1; + lmac->last_speed = SPEED_1000; + lmac->last_duplex = DUPLEX_FULL; bgx_sgmii_change_link_state(lmac); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 0e9182d3f02c..b3e4118a15e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -443,9 +443,9 @@ found: struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) { struct l2t_data *d; - int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); + int i; - d = kvzalloc(size, GFP_KERNEL); + d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL); if (!d) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c2fd323c4078..ea75f275023f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -75,8 +75,8 @@ struct l2t_data { struct l2t_entry *rover; /* starting point for next allocation */ atomic_t nfree; /* number of free entries */ rwlock_t lock; - struct l2t_entry l2tab[0]; struct rcu_head rcu_head; /* to handle rcu cleanup */ + struct l2t_entry l2tab[]; }; typedef void (*arp_failure_handler_func)(struct t3cdev * dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 956219c178e1..a8fe0808823d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1575,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter *adapter); int t4_wait_dev_ready(void __iomem *regs); +fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port, + struct link_config *lc); int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc, - bool sleep_ok, int timeout); + u8 sleep_ok, int timeout); static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, unsigned int port, struct link_config *lc) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index bec4711005cc..9e589302af90 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int speed) * Link Mode Mask. */ static void fw_caps_to_lmm(enum fw_port_type port_type, - unsigned int fw_caps, + fw_port_cap32_t fw_caps, unsigned long *link_mode_mask) { #define SET_LMM(__lmm_name) \ @@ -632,7 +632,10 @@ static int get_link_ksettings(struct net_device *dev, fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, link_ksettings->link_modes.supported); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, + fw_caps_to_lmm(pi->port_type, + t4_link_acaps(pi->adapter, + pi->lport, + &pi->link_cfg), link_ksettings->link_modes.advertising); fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); @@ -642,22 +645,6 @@ static int get_link_ksettings(struct net_device *dev, : SPEED_UNKNOWN); base->duplex = DUPLEX_FULL; - if (pi->link_cfg.fc & PAUSE_RX) { - if (pi->link_cfg.fc & PAUSE_TX) { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Pause); - } else { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Asym_Pause); - } - } else if (pi->link_cfg.fc & PAUSE_TX) { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Asym_Pause); - } - base->autoneg = pi->link_cfg.autoneg; if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 5afb43000049..4107007b6ec4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -524,8 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx) return -ENOMEM; fwr = __skb_put(skb, len); - t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1 - : adapter->sge.fw_evtq.abs_id); + t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. @@ -744,16 +743,40 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) void clear_all_filters(struct adapter *adapter) { + struct net_device *dev = adapter->port[0]; unsigned int i; if (adapter->tids.ftid_tab) { struct filter_entry *f = &adapter->tids.ftid_tab[0]; unsigned int max_ftid = adapter->tids.nftids + adapter->tids.nsftids; - + /* Clear all TCAM filters */ for (i = 0; i < max_ftid; i++, f++) if (f->valid || f->pending) - clear_filter(adapter, f); + cxgb4_del_filter(dev, i, &f->fs); + } + + /* Clear all hash filters */ + if (is_hashfilter(adapter) && adapter->tids.tid_tab) { + struct filter_entry *f; + unsigned int sb; + + for (i = adapter->tids.hash_base; + i <= adapter->tids.ntids; i++) { + f = (struct filter_entry *) + adapter->tids.tid_tab[i]; + + if (f && (f->valid || f->pending)) + cxgb4_del_filter(dev, i, &f->fs); + } + + sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A); + for (i = 0; i < sb; i++) { + f = (struct filter_entry *)adapter->tids.tid_tab[i]; + + if (f && (f->valid || f->pending)) + cxgb4_del_filter(dev, i, &f->fs); + } } } @@ -1568,9 +1591,8 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id, struct filter_ctx ctx; int ret; - /* If we are shutting down the adapter do not wait for completion */ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN) - return __cxgb4_del_filter(dev, filter_id, fs, NULL); + return 0; init_completion(&ctx.completion); @@ -1722,12 +1744,13 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl) break; default: - dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n", - __func__, status); + if (status != CPL_ERR_TCAM_FULL) + dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n", + __func__, status); if (ctx) { if (status == CPL_ERR_TCAM_FULL) - ctx->result = -EAGAIN; + ctx->result = -ENOSPC; else ctx->result = -EINVAL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 89179e316687..7487852e6afa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -979,8 +979,7 @@ freeout: } static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { int txq; @@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return txq; } - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; + return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; } static int closest_timer(const struct sge *s, int time) @@ -6025,6 +6024,11 @@ static void remove_one(struct pci_dev *pdev) return; } + /* If we allocated filters, free up state associated with any + * valid filters ... + */ + clear_all_filters(adapter); + adapter->flags |= CXGB4_SHUTTING_DOWN; if (adapter->pf == 4) { @@ -6055,11 +6059,6 @@ static void remove_one(struct pci_dev *pdev) if (IS_REACHABLE(CONFIG_THERMAL)) cxgb4_thermal_remove(adapter); - /* If we allocated filters, free up state associated with any - * valid filters ... - */ - clear_all_filters(adapter); - if (adapter->flags & CXGB4_FULL_INIT_DONE) cxgb_down(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 82a8d1970060..6e2d80008a79 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -687,11 +687,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev, ret = ctx.result; /* Check if hw returned error for filter creation */ - if (ret) { - netdev_err(dev, "%s: filter creation err %d\n", - __func__, ret); + if (ret) goto free_entry; - } ch_flower->tc_flower_cookie = cls->cookie; ch_flower->filter_id = ctx.tid; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a3544041ad32..f9b70be59792 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) } } +/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port + * Capabilities which we control with separate controls -- see, for instance, + * Pause Frames and Forward Error Correction. In order to determine what the + * full set of Advertised Port Capabilities are, the base Advertised Port + * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised + * Port Capabilities associated with those other controls. See + * t4_link_acaps() for how this is done. + */ #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ FW_PORT_CAP32_ANEG) @@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) /* Translate Common Code Pause specification into Firmware Port Capabilities */ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) { + /* Translate orthogonal RX/TX Pause Controls for L1 Configure + * commands, etc. + */ fw_port_cap32_t fw_pause = 0; if (cc_pause & PAUSE_RX) @@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) if (!(cc_pause & PAUSE_AUTONEG)) fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; + /* Translate orthogonal Pause controls into IEEE 802.3 Pause, + * Asymetrical Pause for use in reporting to upper layer OS code, etc. + * Note that these bits are ignored in L1 Configure commands. + */ + if (cc_pause & PAUSE_RX) { + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_802_3_PAUSE; + else + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; + } else if (cc_pause & PAUSE_TX) { + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; + } + return fw_pause; } @@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) } /** - * t4_link_l1cfg - apply link configuration to MAC/PHY + * t4_link_acaps - compute Link Advertised Port Capabilities * @adapter: the adapter - * @mbox: the Firmware Mailbox to use * @port: the Port ID * @lc: the Port's Link Configuration - * @sleep_ok: if true we may sleep while awaiting command completion - * @timeout: time to wait for command to finish before timing out - * (negative implies @sleep_ok=false) * - * Set up a port's MAC and PHY according to a desired link configuration. - * - If the PHY can auto-negotiate first decide what to advertise, then - * enable/disable auto-negotiation as desired, and reset. - * - If the PHY does not auto-negotiate just reset it. - * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, - * otherwise do it later based on the outcome of auto-negotiation. + * Synthesize the Advertised Port Capabilities we'll be using based on + * the base Advertised Port Capabilities (which have been filtered by + * ADVERT_MASK) plus the individual controls for things like Pause + * Frames, Forward Error Correction, MDI, etc. */ -int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, - unsigned int port, struct link_config *lc, - bool sleep_ok, int timeout) +fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port, + struct link_config *lc) { - unsigned int fw_caps = adapter->params.fw_caps_support; - fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; - struct fw_port_cmd cmd; + fw_port_cap32_t fw_fc, fw_fec, acaps; unsigned int fw_mdi; - int ret; + char cc_fec; fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); @@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, * init_link_config(). */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { - if (lc->autoneg == AUTONEG_ENABLE) - return -EINVAL; - - rcap = lc->acaps | fw_fc | fw_fec; + acaps = lc->acaps | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { - rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; + acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else { - rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; + acaps = lc->acaps | fw_fc | fw_fec | fw_mdi; } /* Some Requested Port Capabilities are trivially wrong if they exceed @@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, * we need to exclude this from this check in order to maintain * compatibility ... */ - if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { - dev_err(adapter->pdev_dev, - "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", - rcap, lc->pcaps); + if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { + dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", + acaps, lc->pcaps); + return -EINVAL; + } + + return acaps; +} + +/** + * t4_link_l1cfg_core - apply link configuration to MAC/PHY + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration + * @sleep_ok: if true we may sleep while awaiting command completion + * @timeout: time to wait for command to finish before timing out + * (negative implies @sleep_ok=false) + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc, + u8 sleep_ok, int timeout) +{ + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd cmd; + fw_port_cap32_t rcap; + int ret; + + if (!(lc->pcaps & FW_PORT_CAP32_ANEG) && + lc->autoneg == AUTONEG_ENABLE) { return -EINVAL; } - /* And send that on to the Firmware ... + /* Compute our Requested Port Capabilities and send that on to the + * Firmware. */ + rcap = t4_link_acaps(adapter, port, lc); memset(&cmd, 0, sizeof(cmd)); cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_EXEC_F | @@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, rcap, -ret); return ret; } - return ret; + return 0; } /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 9125ddd89dd1..a02b1dff403e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x16 -#define T4FW_VERSION_MICRO 0x09 +#define T4FW_VERSION_MINOR 0x17 +#define T4FW_VERSION_MICRO 0x03 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x16 -#define T5FW_VERSION_MICRO 0x09 +#define T5FW_VERSION_MINOR 0x17 +#define T5FW_VERSION_MICRO 0x03 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x16 -#define T6FW_VERSION_MICRO 0x09 +#define T6FW_VERSION_MINOR 0x17 +#define T6FW_VERSION_MICRO 0x03 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index adc4d481815b..6d4cf3d0b2f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -518,8 +518,8 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, break; } cpl = (void *)p; - /*FALLTHROUGH*/ } + /* Fall through */ case CPL_SGE_EGR_UPDATE: { /* @@ -1479,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, base->duplex = DUPLEX_UNKNOWN; } - if (pi->link_cfg.fc & PAUSE_RX) { - if (pi->link_cfg.fc & PAUSE_TX) { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Pause); - } else { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Asym_Pause); - } - } else if (pi->link_cfg.fc & PAUSE_TX) { - ethtool_link_ksettings_add_link_mode(link_ksettings, - advertising, - Asym_Pause); - } - base->autoneg = pi->link_cfg.autoneg; if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 84dff74ca9cd..8a389d617a23 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return ret; } +/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to + * mask out bits in the Advertised Port Capabilities which are managed via + * separate controls, like Pause Frames and Forward Error Correction. In the + * Virtual Function Common Code, since we never perform L1 Configuration on + * the Link, the only things we really need to filter out are things which + * we decode and report separately like Speed. + */ #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_802_3_PAUSE | \ + FW_PORT_CAP32_802_3_ASM_DIR | \ + FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \ FW_PORT_CAP32_ANEG) /** diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 733d9172425b..acb2856936d2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_tx_stop_queue(txq); skb_tx_timestamp(skb); - if (!skb->xmit_more || netif_xmit_stopped(txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) vnic_wq_doorbell(wq); spin_unlock(&enic->wq_lock[txq_map]); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 949103db8a8a..9003eb6716cd 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1235,8 +1235,6 @@ static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev) int txq_num, nfrags; union dma_rwptr rw; - SKB_FRAG_ASSERT(skb); - if (skb->len >= 0x10000) goto out_drop_free; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3c7c04406a2b..e2f9fbced174 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) u16 q_idx = skb_get_queue_mapping(skb); struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; struct be_wrb_params wrb_params = { 0 }; - bool flush = !skb->xmit_more; + bool flush = !netdev_xmit_more(); u16 wrb_cnt; skb = be_xmit_workarounds(adapter, skb, &wrb_params); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index dc339dc1adb2..63b1ecc18c26 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -435,7 +435,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, percpu_stats->rx_packets++; percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); - napi_gro_receive(&ch->napi, skb); + list_add_tail(&skb->list, ch->rx_list); return; @@ -1113,12 +1113,16 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) struct dpaa2_eth_fq *fq, *txc_fq = NULL; struct netdev_queue *nq; int store_cleaned, work_done; + struct list_head rx_list; int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); ch->xdp.res = 0; priv = ch->priv; + INIT_LIST_HEAD(&rx_list); + ch->rx_list = &rx_list; + do { err = pull_channel(ch); if (unlikely(err)) @@ -1162,6 +1166,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) work_done = max(rx_cleaned, 1); out: + netif_receive_skb_list(ch->rx_list); + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, @@ -2565,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { .rxnfc_field = RXH_L2DA, .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_DA, + .id = DPAA2_ETH_DIST_ETHDST, .size = 6, }, { .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_SA, + .id = DPAA2_ETH_DIST_ETHSRC, .size = 6, }, { /* This is the last ethertype field parsed: @@ -2577,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { */ .cls_prot = NET_PROT_ETH, .cls_field = NH_FLD_ETH_TYPE, + .id = DPAA2_ETH_DIST_ETHTYPE, .size = 2, }, { /* VLAN header */ .rxnfc_field = RXH_VLAN, .cls_prot = NET_PROT_VLAN, .cls_field = NH_FLD_VLAN_TCI, + .id = DPAA2_ETH_DIST_VLAN, .size = 2, }, { /* IP header */ .rxnfc_field = RXH_IP_SRC, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_SRC, + .id = DPAA2_ETH_DIST_IPSRC, .size = 4, }, { .rxnfc_field = RXH_IP_DST, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_DST, + .id = DPAA2_ETH_DIST_IPDST, .size = 4, }, { .rxnfc_field = RXH_L3_PROTO, .cls_prot = NET_PROT_IP, .cls_field = NH_FLD_IP_PROTO, + .id = DPAA2_ETH_DIST_IPPROTO, .size = 1, }, { /* Using UDP ports, this is functionally equivalent to raw @@ -2607,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = { .rxnfc_field = RXH_L4_B_0_1, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_SRC, + .id = DPAA2_ETH_DIST_L4SRC, .size = 2, }, { .rxnfc_field = RXH_L4_B_2_3, .cls_prot = NET_PROT_UDP, .cls_field = NH_FLD_UDP_PORT_DST, + .id = DPAA2_ETH_DIST_L4DST, .size = 2, }, }; @@ -2677,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) } /* Size of the Rx flow classification key */ -int dpaa2_eth_cls_key_size(void) +int dpaa2_eth_cls_key_size(u64 fields) { int i, size = 0; - for (i = 0; i < ARRAY_SIZE(dist_fields); i++) + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { + if (!(fields & dist_fields[i].id)) + continue; size += dist_fields[i].size; + } return size; } @@ -2703,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field) return 0; } +/* Prune unused fields from the classification rule. + * Used when masking is not supported + */ +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) +{ + int off = 0, new_off = 0; + int i, size; + + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { + size = dist_fields[i].size; + if (dist_fields[i].id & fields) { + memcpy(key_mem + new_off, key_mem + off, size); + new_off += size; + } + off += size; + } +} + /* Set Rx distribution (hash or flow classification) key * flags is a combination of RXH_ bits */ @@ -2724,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev, struct dpkg_extract *key = &cls_cfg.extracts[cls_cfg.num_extracts]; - /* For Rx hashing key we set only the selected fields. - * For Rx flow classification key we set all supported fields + /* For both Rx hashing and classification keys + * we set only the selected fields. */ - if (type == DPAA2_ETH_RX_DIST_HASH) { - if (!(flags & dist_fields[i].rxnfc_field)) - continue; + if (!(flags & dist_fields[i].id)) + continue; + if (type == DPAA2_ETH_RX_DIST_HASH) rx_hash_fields |= dist_fields[i].rxnfc_field; - } if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { dev_err(dev, "error adding key extraction rule, too many rules?\n"); @@ -2786,16 +2821,28 @@ free_key: int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u64 key = 0; + int i; if (!dpaa2_eth_hash_enabled(priv)) return -EOPNOTSUPP; - return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); + for (i = 0; i < ARRAY_SIZE(dist_fields); i++) + if (dist_fields[i].rxnfc_field & flags) + key |= dist_fields[i].id; + + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); +} + +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) +{ + return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); } -static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; + int err; /* Check if we actually support Rx flow classification */ if (dpaa2_eth_has_legacy_dist(priv)) { @@ -2803,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) return -EOPNOTSUPP; } - if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || - !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { + if (!dpaa2_eth_fs_enabled(priv)) { dev_dbg(dev, "Rx cls disabled in DPNI options\n"); return -EOPNOTSUPP; } @@ -2814,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) return -EOPNOTSUPP; } + /* If there is no support for masking in the classification table, + * we don't set a default key, as it will depend on the rules + * added by the user at runtime. + */ + if (!dpaa2_eth_fs_mask_enabled(priv)) + goto out; + + err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); + if (err) + return err; + +out: priv->rx_cls_enabled = 1; - return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); + return 0; } /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, @@ -2851,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) /* Configure the flow classification key; it includes all * supported header fields and cannot be modified at runtime */ - err = dpaa2_eth_set_cls(priv); + err = dpaa2_eth_set_default_cls(priv); if (err && err != -EOPNOTSUPP) dev_err(dev, "Failed to configure Rx classification key\n"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 7879622aa3e6..5fb8f5c0dc9f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -334,6 +334,7 @@ struct dpaa2_eth_channel { struct dpaa2_eth_ch_stats stats; struct dpaa2_eth_ch_xdp xdp; struct xdp_rxq_info xdp_rxq; + struct list_head *rx_list; }; struct dpaa2_eth_dist_fields { @@ -341,6 +342,7 @@ struct dpaa2_eth_dist_fields { enum net_prot cls_prot; int cls_field; int size; + u64 id; }; struct dpaa2_eth_cls_rule { @@ -393,6 +395,7 @@ struct dpaa2_eth_priv { /* enabled ethtool hashing bits */ u64 rx_hash_fields; + u64 rx_cls_fields; struct dpaa2_eth_cls_rule *cls_rules; u8 rx_cls_enabled; struct bpf_prog *xdp_prog; @@ -436,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \ DPNI_RX_DIST_KEY_VER_MINOR) < 0) +#define dpaa2_eth_fs_enabled(priv) \ + (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS)) + +#define dpaa2_eth_fs_mask_enabled(priv) \ + ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING) + #define dpaa2_eth_fs_count(priv) \ ((priv)->dpni_attrs.fs_entries) @@ -448,6 +457,18 @@ enum dpaa2_eth_rx_dist { DPAA2_ETH_RX_DIST_CLS }; +/* Unique IDs for the supported Rx classification header fields */ +#define DPAA2_ETH_DIST_ETHDST BIT(0) +#define DPAA2_ETH_DIST_ETHSRC BIT(1) +#define DPAA2_ETH_DIST_ETHTYPE BIT(2) +#define DPAA2_ETH_DIST_VLAN BIT(3) +#define DPAA2_ETH_DIST_IPSRC BIT(4) +#define DPAA2_ETH_DIST_IPDST BIT(5) +#define DPAA2_ETH_DIST_IPPROTO BIT(6) +#define DPAA2_ETH_DIST_L4SRC BIT(7) +#define DPAA2_ETH_DIST_L4DST BIT(8) +#define DPAA2_ETH_DIST_ALL (~0U) + static inline unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) @@ -482,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) } int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); -int dpaa2_eth_cls_key_size(void); +int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key); +int dpaa2_eth_cls_key_size(u64 key); int dpaa2_eth_cls_fld_off(int prot, int field); +void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields); #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 591dfcf76adb..76bd8d2872cc 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, } static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, - void *key, void *mask) + void *key, void *mask, u64 *fields) { int off; @@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); *(__be16 *)(key + off) = eth_value->h_proto; *(__be16 *)(mask + off) = eth_mask->h_proto; + *fields |= DPAA2_ETH_DIST_ETHTYPE; } if (!is_zero_ether_addr(eth_mask->h_source)) { off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA); ether_addr_copy(key + off, eth_value->h_source); ether_addr_copy(mask + off, eth_mask->h_source); + *fields |= DPAA2_ETH_DIST_ETHSRC; } if (!is_zero_ether_addr(eth_mask->h_dest)) { off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); ether_addr_copy(key + off, eth_value->h_dest); ether_addr_copy(mask + off, eth_mask->h_dest); + *fields |= DPAA2_ETH_DIST_ETHDST; } return 0; @@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, struct ethtool_usrip4_spec *uip_mask, - void *key, void *mask) + void *key, void *mask, u64 *fields) { int off; u32 tmp_value, tmp_mask; @@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); *(__be32 *)(key + off) = uip_value->ip4src; *(__be32 *)(mask + off) = uip_mask->ip4src; + *fields |= DPAA2_ETH_DIST_IPSRC; } if (uip_mask->ip4dst) { off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); *(__be32 *)(key + off) = uip_value->ip4dst; *(__be32 *)(mask + off) = uip_mask->ip4dst; + *fields |= DPAA2_ETH_DIST_IPDST; } if (uip_mask->proto) { off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); *(u8 *)(key + off) = uip_value->proto; *(u8 *)(mask + off) = uip_mask->proto; + *fields |= DPAA2_ETH_DIST_IPPROTO; } if (uip_mask->l4_4_bytes) { @@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value, off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); *(__be16 *)(key + off) = htons(tmp_value >> 16); *(__be16 *)(mask + off) = htons(tmp_mask >> 16); + *fields |= DPAA2_ETH_DIST_L4SRC; off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF); *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF); + *fields |= DPAA2_ETH_DIST_L4DST; } /* Only apply the rule for IPv4 frames */ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); *(__be16 *)(key + off) = htons(ETH_P_IP); *(__be16 *)(mask + off) = htons(0xFFFF); + *fields |= DPAA2_ETH_DIST_ETHTYPE; return 0; } static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, struct ethtool_tcpip4_spec *l4_mask, - void *key, void *mask, u8 l4_proto) + void *key, void *mask, u8 l4_proto, u64 *fields) { int off; @@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value, off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC); *(__be32 *)(key + off) = l4_value->ip4src; *(__be32 *)(mask + off) = l4_mask->ip4src; + *fields |= DPAA2_ETH_DIST_IPSRC; } if (l4_mask->ip4dst) { off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST); *(__be32 *)(key + off) = l4_value->ip4dst; *(__be32 *)(mask + off) = l4_mask->ip4dst; + *fields |= DPAA2_ETH_DIST_IPDST; } if (l4_mask->psrc) { off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); *(__be16 *)(key + off) = l4_value->psrc; *(__be16 *)(mask + off) = l4_mask->psrc; + *fields |= DPAA2_ETH_DIST_L4SRC; } if (l4_mask->pdst) { off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST); *(__be16 *)(key + off) = l4_value->pdst; *(__be16 *)(mask + off) = l4_mask->pdst; + *fields |= DPAA2_ETH_DIST_L4DST; } /* Only apply the rule for IPv4 frames with the specified L4 proto */ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE); *(__be16 *)(key + off) = htons(ETH_P_IP); *(__be16 *)(mask + off) = htons(0xFFFF); + *fields |= DPAA2_ETH_DIST_ETHTYPE; off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO); *(u8 *)(key + off) = l4_proto; *(u8 *)(mask + off) = 0xFF; + *fields |= DPAA2_ETH_DIST_IPPROTO; return 0; } static int prep_ext_rule(struct ethtool_flow_ext *ext_value, struct ethtool_flow_ext *ext_mask, - void *key, void *mask) + void *key, void *mask, u64 *fields) { int off; @@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value, off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI); *(__be16 *)(key + off) = ext_value->vlan_tci; *(__be16 *)(mask + off) = ext_mask->vlan_tci; + *fields |= DPAA2_ETH_DIST_VLAN; } return 0; @@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value, static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, struct ethtool_flow_ext *ext_mask, - void *key, void *mask) + void *key, void *mask, u64 *fields) { int off; @@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value, off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA); ether_addr_copy(key + off, ext_value->h_dest); ether_addr_copy(mask + off, ext_mask->h_dest); + *fields |= DPAA2_ETH_DIST_ETHDST; } return 0; } -static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) +static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask, + u64 *fields) { int err; switch (fs->flow_type & 0xFF) { case ETHER_FLOW: err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec, - key, mask); + key, mask, fields); break; case IP_USER_FLOW: err = prep_uip_rule(&fs->h_u.usr_ip4_spec, - &fs->m_u.usr_ip4_spec, key, mask); + &fs->m_u.usr_ip4_spec, key, mask, fields); break; case TCP_V4_FLOW: err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec, - key, mask, IPPROTO_TCP); + key, mask, IPPROTO_TCP, fields); break; case UDP_V4_FLOW: err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec, - key, mask, IPPROTO_UDP); + key, mask, IPPROTO_UDP, fields); break; case SCTP_V4_FLOW: err = prep_l4_rule(&fs->h_u.sctp_ip4_spec, &fs->m_u.sctp_ip4_spec, key, mask, - IPPROTO_SCTP); + IPPROTO_SCTP, fields); break; default: return -EOPNOTSUPP; @@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask) return err; if (fs->flow_type & FLOW_EXT) { - err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields); if (err) return err; } if (fs->flow_type & FLOW_MAC_EXT) { - err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask); + err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, + fields); if (err) return err; } @@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev, struct dpni_rule_cfg rule_cfg = { 0 }; struct dpni_fs_action_cfg fs_act = { 0 }; dma_addr_t key_iova; + u64 fields = 0; void *key_buf; int err; @@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev, fs->ring_cookie >= dpaa2_eth_queue_count(priv)) return -EINVAL; - rule_cfg.key_size = dpaa2_eth_cls_key_size(); + rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL); /* allocate twice the key size, for the actual key and for mask */ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL); @@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev, return -ENOMEM; /* Fill the key and mask memory areas */ - err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size); + err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields); if (err) goto free_mem; + if (!dpaa2_eth_fs_mask_enabled(priv)) { + /* Masking allows us to configure a maximal key during init and + * use it for all flow steering rules. Without it, we include + * in the key only the fields actually used, so we need to + * extract the others from the final key buffer. + * + * Program the FS key if needed, or return error if previously + * set key can't be used for the current rule. User needs to + * delete existing rules in this case to allow for the new one. + */ + if (!priv->rx_cls_fields) { + err = dpaa2_eth_set_cls(net_dev, fields); + if (err) + goto free_mem; + + priv->rx_cls_fields = fields; + } else if (priv->rx_cls_fields != fields) { + netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n"); + err = -EOPNOTSUPP; + goto free_mem; + } + + dpaa2_eth_cls_trim_rule(key_buf, fields); + rule_cfg.key_size = dpaa2_eth_cls_key_size(fields); + } + key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2, DMA_TO_DEVICE); if (dma_mapping_error(dev, key_iova)) { @@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev, } rule_cfg.key_iova = key_iova; - rule_cfg.mask_iova = key_iova + rule_cfg.key_size; + if (dpaa2_eth_fs_mask_enabled(priv)) + rule_cfg.mask_iova = key_iova + rule_cfg.key_size; if (add) { if (fs->ring_cookie == RX_CLS_FLOW_DISC) @@ -522,6 +569,17 @@ free_mem: return err; } +static int num_rules(struct dpaa2_eth_priv *priv) +{ + int i, rules = 0; + + for (i = 0; i < dpaa2_eth_fs_count(priv); i++) + if (priv->cls_rules[i].in_use) + rules++; + + return rules; +} + static int update_cls_rule(struct net_device *net_dev, struct ethtool_rx_flow_spec *new_fs, int location) @@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev, return err; rule->in_use = 0; + + if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv)) + priv->rx_cls_fields = 0; } /* If no new entry to add, return here */ @@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, break; case ETHTOOL_GRXCLSRLCNT: rxnfc->rule_cnt = 0; - for (i = 0; i < max_rules; i++) - if (priv->cls_rules[i].in_use) - rxnfc->rule_cnt++; + rxnfc->rule_cnt = num_rules(priv); rxnfc->data = max_rules; break; case ETHTOOL_GRXCLSRULE: diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 61eea6ac846f..e05d2095d09b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2769,7 +2769,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) struct hns_mac_cb *mac_cb; u8 addr[ETH_ALEN] = {0}; u8 port_num; - u16 mskid; + int mskid; /* promisc use vague table match with vlanid = 0 & macaddr = 0 */ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 4cd86ba1f050..65b985acae38 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -598,7 +598,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, } else { ring->stats.seg_pkt_cnt++; - pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); + pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE); memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); @@ -1962,8 +1962,7 @@ static void hns_nic_get_stats64(struct net_device *ndev, static u16 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; struct hns_nic_priv *priv = netdev_priv(ndev); @@ -1973,7 +1972,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb, is_multicast_ether_addr(eth_hdr->h_dest)) return 0; else - return fallback(ndev, skb, NULL); + return netdev_pick_tx(ndev, skb, NULL); } static const struct net_device_ops hns_nic_netdev_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 299b277bc7ae..83e19c6b974e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; @@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ }; #define HCLGE_MBX_MAX_MSG_SIZE 16 @@ -80,12 +84,15 @@ struct hclgevf_mbx_resp_status { struct hclge_mbx_vf_to_pf_cmd { u8 rsv; u8 mbx_src_vfid; /* Auto filled by IMP */ - u8 rsv1[2]; + u8 mbx_need_resp; + u8 rsv1[1]; u8 msg_len; u8 rsv2[3]; u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; }; +#define HCLGE_MBX_NEED_RESP_BIT BIT(0) + struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; @@ -107,7 +114,7 @@ struct hclgevf_mbx_arq_ring { struct hclgevf_dev *hdev; u32 head; u32 tail; - u32 count; + atomic_t count; u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 17ab4f4af6ad..fa8b8506b120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, return inited; } -static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, bool is_reg) +static int hnae3_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { int ret; @@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, return 0; } - /* now, (un-)instantiate client by calling lower layer */ - if (is_reg) { - ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) - dev_err(&ae_dev->pdev->dev, - "fail to instantiate client, ret = %d\n", ret); + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client, ret = %d\n", ret); - return ret; - } + return ret; +} + +static void hnae3_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) + return; if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); hnae3_set_client_init_flag(client, ae_dev, 0); } - - return 0; } int hnae3_register_client(struct hnae3_client *client) @@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port, ret = %d\n", @@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client) mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); } list_del(&client->node); @@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) * un-initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); @@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) continue; list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 38b430f11fc1..dce68d3d7907 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -147,6 +147,13 @@ enum hnae3_flr_state { HNAE3_FLR_DONE, }; +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -385,7 +392,8 @@ struct hnae3_ae_ops { void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae3_handle *handle, u64 *data); - + void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt); void (*get_strings)(struct hnae3_handle *handle, u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); @@ -578,8 +586,13 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ + enum hnae3_port_base_vlan_state port_base_vlan_state; + u8 netdev_flags; struct dentry *hnae3_dbgfs; + + /* Network interface message level enabled bits */ + u32 msg_enable; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 0de543faa5b1..fc4917ac44be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "queue info [number]\n"); dev_info(&h->pdev->dev, "queue map\n"); dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + + if (!hns3_is_phys_func(h->pdev)) + return; + dev_info(&h->pdev->dev, "dump fd tcam\n"); dev_info(&h->pdev->dev, "dump tc\n"); dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); @@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump qos pri map\n"); dev_info(&h->pdev->dev, "dump qos buf cfg\n"); dev_info(&h->pdev->dev, "dump mng tbl\n"); + dev_info(&h->pdev->dev, "dump reset info\n"); + dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n"); + dev_info(&h->pdev->dev, "dump mac tnl status\n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", @@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ret = hns3_dbg_bd_info(handle, cmd_buf); else if (handle->ae_algo->ops->dbg_run_cmd) ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); + else + ret = -EOPNOTSUPP; if (ret) hns3_dbg_help(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 162cb9afa0e7..96272e632afc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -35,6 +35,13 @@ static const char hns3_driver_string[] = static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; static struct hnae3_client client; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -827,12 +834,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { -#define IANA_VXLAN_PORT 4789 union l4_hdr_info l4; l4.hdr = skb_transport_header(skb); - if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + if (!(!skb->encapsulation && + l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) return false; skb_checksum_help(skb); @@ -963,6 +970,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, { #define HNS3_TX_VLAN_PRIO_SHIFT 13 + struct hnae3_handle *handle = tx_ring->tqp->handle; + + /* Since HW limitation, if port based insert VLAN enabled, only one VLAN + * header is allowed in skb, otherwise it will cause RAS error. + */ + if (unlikely(skb_vlan_tagged_multi(skb) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_ENABLE)) + return -EINVAL; + if (skb->protocol == htons(ETH_P_8021Q) && !(tx_ring->tqp->handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { @@ -984,8 +1001,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE){ + hns3_set_field(*out_vlan_flag, + HNS3_TXD_OVLAN_B, 1); + *out_vtag = vlan_tag; + } else { + hns3_set_field(*inner_vlan_flag, + HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } } else { hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; @@ -1012,7 +1037,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; int k, sizeoflast; @@ -1080,12 +1104,30 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc_cb->length = size; + if (likely(size <= HNS3_MAX_BD_SIZE)) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + + desc_cb->priv = priv; + desc_cb->dma = dma; + desc_cb->type = type; + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16(size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + ring_ptr_move_fw(ring, next_to_use); + return 0; + } + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; @@ -1574,6 +1616,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) struct hnae3_handle *h = hns3_get_handle(netdev); int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; @@ -1590,13 +1635,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); struct hns3_enet_ring *tx_ring = NULL; + struct napi_struct *napi; int timeout_queue = 0; int hw_head, hw_tail; + int fbd_num, fbd_oft; + int ebd_num, ebd_oft; + int bd_num, bd_err; + int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { + for (i = 0; i < ndev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; @@ -1617,21 +1668,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) return false; } + priv->tx_timeout_count++; + tx_ring = priv->ring_data[timeout_queue].ring; + napi = &tx_ring->tqp_vector->napi; + + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, + tx_ring->next_to_clean, napi->state); + + netdev_info(ndev, + "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, + tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); + + netdev_info(ndev, + "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + /* When mac received many pause frames continuous, it's unable to send + * packets, which may cause tx timeout + */ + if (h->ae_algo->ops->update_stats && + h->ae_algo->ops->get_mac_pause_stats) { + u64 tx_pause_cnt, rx_pause_cnt; + + h->ae_algo->ops->update_stats(h, &ndev->stats); + h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt, + &rx_pause_cnt); + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", + tx_pause_cnt, rx_pause_cnt); + } hw_head = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); hw_tail = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + fbd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + fbd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + ebd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBDNUM_REG); + ebd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBD_OFFSET_REG); + bd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + bd_err = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_ERR_REG); + ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); + tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); + netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + bd_num, hw_head, hw_tail, bd_err, readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); return true; } @@ -1644,8 +1740,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev) if (!hns3_get_tx_timeo_queue_info(ndev)) return; - priv->tx_timeout_count++; - /* request the reset, and let the hclge to determine * which reset level should be done */ @@ -1670,7 +1764,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; -static bool hns3_is_phys_func(struct pci_dev *pdev) +bool hns3_is_phys_func(struct pci_dev *pdev) { u32 dev_id = pdev->device; @@ -2120,14 +2214,22 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, int *pkts) { - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + int ntc = ring->next_to_clean; + struct hns3_desc_cb *desc_cb; + desc_cb = &ring->desc_cb[ntc]; (*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*bytes) += desc_cb->length; /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); + hns3_free_buffer_detach(ring, ntc); - ring_ptr_move_fw(ring, next_to_clean); + if (++ntc == ring->desc_num) + ntc = 0; + + /* This smp_store_release() pairs with smp_load_acquire() in + * ring_space called by hns3_nic_net_xmit. + */ + smp_store_release(&ring->next_to_clean, ntc); } static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) @@ -2293,17 +2395,50 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, } } +static int hns3_gro_complete(struct sk_buff *skb) +{ + __be16 type = skb->protocol; + struct tcphdr *th; + int depth = 0; + + while (type == htons(ETH_P_8021Q)) { + struct vlan_hdr *vh; + + if ((depth + VLAN_HLEN) > skb_headlen(skb)) + return -EFAULT; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; + } + + if (type == htons(ETH_P_IP)) { + depth += sizeof(struct iphdr); + } else if (type == htons(ETH_P_IPV6)) { + depth += sizeof(struct ipv6hdr); + } else { + netdev_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); + return -EFAULT; + } + + th = (struct tcphdr *)(skb->data + depth); + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + return 0; +} + static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) + u32 l234info, u32 bd_base_info) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int l3_type, l4_type; - u32 bd_base_info; int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); skb->ip_summed = CHECKSUM_NONE; @@ -2312,12 +2447,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; - /* We MUST enable hardware checksum before enabling hardware GRO */ - if (skb_shinfo(skb)->gso_size) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } - /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; @@ -2370,6 +2499,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, u32 l234info, u16 *vlan_tag) { + struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; if (pdev->revision == 0x20) { @@ -2382,15 +2512,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + /* Hardware always insert VLAN tag into RX descriptor when + * remove the tag from packet, driver needs to determine + * reporting which tag to stack. + */ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); return true; case HNS3_STRP_INNER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); return true; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return true; default: return false; } @@ -2437,7 +2588,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, desc_cb); @@ -2512,8 +2663,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, return 0; } -static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, - u32 bd_base_info) +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info, + u32 bd_base_info) { u16 gro_count; u32 l3_type; @@ -2521,12 +2673,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, HNS3_RXD_GRO_COUNT_S); /* if there is no HW GRO, do not set gro params */ - if (!gro_count) - return; + if (!gro_count) { + hns3_rx_checksum(ring, skb, l234info, bd_base_info); + return 0; + } - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ NAPI_GRO_CB(skb)->count = gro_count; l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, @@ -2536,47 +2687,119 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, else if (l3_type == HNS3_L3_TYPE_IPV6) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else - return; + return -EFAULT; skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_S); - if (skb_shinfo(skb)->gso_size) - tcp_gro_complete(skb); + + return hns3_gro_complete(skb); } static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, - struct sk_buff *skb) + struct sk_buff *skb, u32 rss_hash) { struct hnae3_handle *handle = ring->tqp->handle; enum pkt_hash_types rss_type; - struct hns3_desc *desc; - int last_bd; - - /* When driver handle the rss type, ring->next_to_clean indicates the - * first descriptor of next packet, need -1 here. - */ - last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num; - desc = &ring->desc[last_bd]; - if (le32_to_cpu(desc->rx.rss_hash)) + if (rss_hash) rss_type = handle->kinfo.rss_type; else rss_type = PKT_HASH_TYPE_NONE; - skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); + skb_set_hash(skb, rss_hash, rss_type); } -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb) +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; enum hns3_pkt_l2t_type l2_frame_type; + u32 bd_base_info, l234info; + struct hns3_desc *desc; + unsigned int len; + int pre_ntc, ret; + + /* bdinfo handled below is only valid on the last BD of the + * current packet, and ring->next_to_clean indicates the first + * descriptor of next packet, so need - 1 below. + */ + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : + (ring->desc_num - 1); + desc = &ring->desc[pre_ntc]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } + + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + return -EINVAL; + } + + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { + u64_stats_update_begin(&ring->syncp); + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + return -EFAULT; + } + + len = skb->len; + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + + /* This is needed in order to enable forwarding support */ + ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_err_cnt++; + u64_stats_update_end(&ring->syncp); + return ret; + } + + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += len; + + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += len; + + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); + return 0; +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb) +{ struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; u32 bd_base_info; - u32 l234info; int length; int ret; @@ -2636,64 +2859,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ALIGN(ring->pull_len, sizeof(long))); } - l234info = le32_to_cpu(desc->rx.l234_info); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - - if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - + ret = hns3_handle_bdinfo(ring, skb); + if (unlikely(ret)) { dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | - BIT(HNS3_RXD_L2E_B))))) { - u64_stats_update_begin(&ring->syncp); - if (l234info & BIT(HNS3_RXD_L2E_B)) - ring->stats.l2_err++; - else - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; + return ret; } - - l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, - HNS3_RXD_DMAC_S); - u64_stats_update_begin(&ring->syncp); - if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) - ring->stats.rx_multicast++; - - ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; - u64_stats_update_end(&ring->syncp); - - ring->tqp_vector->rx_group.total_bytes += skb->len; - - /* This is needed in order to enable forwarding support */ - hns3_set_gro_param(skb, l234info, bd_base_info); - - hns3_rx_checksum(ring, skb, desc); *out_skb = skb; - hns3_set_rx_skb_rss_type(ring, skb); return 0; } @@ -2703,9 +2875,8 @@ int hns3_clean_rx_ring( void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring) - ring->pending_buf; + int unused_count = hns3_desc_unused(ring); struct sk_buff *skb = ring->skb; int num; @@ -2714,6 +2885,7 @@ int hns3_clean_rx_ring( recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; + unused_count -= ring->pending_buf; while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ @@ -2740,8 +2912,6 @@ int hns3_clean_rx_ring( continue; } - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); rx_fn(ring, skb); recv_bds += ring->pending_buf; clean_count += ring->pending_buf; @@ -2891,7 +3061,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) struct hns3_enet_tqp_vector *tqp_vector = container_of(napi, struct hns3_enet_tqp_vector, napi); bool clean_complete = true; - int rx_budget; + int rx_budget = budget; if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { napi_complete(napi); @@ -2905,7 +3075,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); + if (tqp_vector->num_tqps > 1) + rx_budget = max(budget / tqp_vector->num_tqps, 1); hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, @@ -3316,6 +3487,7 @@ err: } devm_kfree(&pdev->dev, priv->ring_data); + priv->ring_data = NULL; return ret; } @@ -3324,12 +3496,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) struct hnae3_handle *h = priv->ae_handle; int i; + if (!priv->ring_data) + return; + for (i = 0; i < h->kinfo.num_tqps; i++) { devm_kfree(priv->dev, priv->ring_data[i].ring); devm_kfree(priv->dev, priv->ring_data[i + h->kinfo.num_tqps].ring); } devm_kfree(priv->dev, priv->ring_data); + priv->ring_data = NULL; } static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) @@ -3584,6 +3760,21 @@ static void hns3_client_stop(struct hnae3_handle *handle) handle->ae_algo->ops->client_stop(handle); } +static void hns3_info_show(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + + dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); + dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -3605,6 +3796,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->tx_timeout_count = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); + handle->kinfo.netdev = netdev; handle->priv = (void *)priv; @@ -3671,6 +3864,9 @@ static int hns3_client_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (netif_msg_drv(handle)) + hns3_info_show(priv); + return ret; out_client_start: @@ -3697,13 +3893,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_client_stop(handle); - hns3_remove_hw_addr(netdev); if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_client_stop(handle); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); goto out_netdev_free; @@ -3729,8 +3925,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_dbg_uninit(handle); - priv->ring_data = NULL; - out_netdev_free: free_netdev(netdev); } @@ -3745,11 +3939,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) if (linkup) { netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link up\n"); } else { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link down\n"); } } @@ -3773,12 +3969,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev) struct netdev_hw_addr *ha, *tmp; int ret = 0; + netif_addr_lock_bh(ndev); /* go through and sync uc_addr entries to the device */ list = &ndev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_uc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } /* go through and sync mc_addr entries to the device */ @@ -3786,9 +3983,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev) list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_mc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } +out: + netif_addr_unlock_bh(ndev); return ret; } @@ -3799,6 +3998,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev) hns3_nic_uc_unsync(netdev, netdev->dev_addr); + netif_addr_lock_bh(netdev); /* go through and unsync uc_addr entries to the device */ list = &netdev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) @@ -3809,6 +4009,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev) list_for_each_entry_safe(ha, tmp, &list->list, list) if (ha->refcount > 1) hns3_nic_mc_unsync(netdev, ha->addr); + + netif_addr_unlock_bh(netdev); } static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) @@ -3850,6 +4052,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ring_ptr_move_fw(ring, next_to_use); } + /* Free the pending skb in rx ring */ + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + ring->pending_buf = 0; + } + return 0; } @@ -4048,18 +4257,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_uninit_ring; + } + set_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; +err_uninit_ring: + hns3_uninit_all_ring(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; err_dealloc_vector: hns3_nic_dealloc_vector_data(priv); err_put_ring: hns3_put_ring_config(priv); - priv->ring_data = NULL; return ret; } @@ -4101,7 +4316,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); return 0; } @@ -4121,9 +4336,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) netdev_err(netdev, "uninit ring error\n"); hns3_put_ring_config(priv); - priv->ring_data = NULL; - - clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 75669cd0c311..2b4f5ea3fddf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -42,8 +42,10 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - +#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C @@ -577,18 +579,16 @@ union l4_hdr_info { unsigned char *hdr; }; -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} - static inline int ring_space(struct hns3_enet_ring *ring) { - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. + */ + int begin = smp_load_acquire(&ring->next_to_clean); + int end = READ_ONCE(ring->next_to_use); + + return ((end >= begin) ? (ring->desc_num - end + begin) : + (begin - end)) - 1; } static inline int is_ring_empty(struct hns3_enet_ring *ring) @@ -666,6 +666,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 359d4731fb2d..3ae11243a558 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev, struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting, could not get stats\n"); + return; + } + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { netdev_err(netdev, "could not get any statistics\n"); return; @@ -648,6 +653,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev, static int hns3_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + /* Chip doesn't support this mode. */ + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + /* Only support ksettings_set for netdev with phy attached for now */ if (netdev->phydev) return phy_ethtool_ksettings_set(netdev->phydev, cmd); @@ -1101,6 +1110,20 @@ static int hns3_set_phys_id(struct net_device *netdev, return h->ae_algo->ops->set_led_id(h, state); } +static u32 hns3_get_msglevel(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + return h->msg_enable; +} + +static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + h->msg_enable = msg_level; +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, @@ -1121,6 +1144,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .get_link = hns3_get_link, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1150,6 +1175,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .set_phys_id = hns3_set_phys_id, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 3a093a92eac5..fbd904e3077c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; @@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) hclge_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); @@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev) * reset may happen when lower level reset is being processed. */ if ((hclge_is_reset_pending(hdev))) { - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "firmware version query failed %d\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclge_cmd_uninit_regs(struct hclge_hw *hw) @@ -411,7 +416,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring) spin_unlock(&ring->lock); } -void hclge_destroy_cmd_queue(struct hclge_hw *hw) +static void hclge_destroy_cmd_queue(struct hclge_hw *hw) { hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3714733c96d9..d01f93eee845 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -109,6 +109,9 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, HCLGE_OPC_SERDES_LOOPBACK = 0x0315, /* PFC/Pause commands */ @@ -237,6 +240,9 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + /* SFP command */ HCLGE_OPC_SFP_GET_SPEED = 0x7104, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 1192cf6f2321..a9ffb57c4607 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) } } +static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "PF reset count: %d\n", + hdev->rst_stats.pf_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %d\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "CORE reset count: %d\n", + hdev->rst_stats.core_rst_cnt); + dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n", + hdev->rst_stats.global_rst_cnt); + dev_info(&hdev->pdev->dev, "IMP reset count: %d\n", + hdev->rst_stats.imp_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %d\n", + hdev->rst_stats.reset_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %d\n", + hdev->rst_stats.hw_reset_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %d\n", + hdev->rst_stats.reset_cnt); +} + +/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file + * @hdev: pointer to struct hclge_dev + * @cmd_buf: string that contains offset and length + */ +static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf) +{ +#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 +#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4) +#define HCLGE_CMD_DATA_NUM 6 + + struct hclge_desc desc[5]; + u32 byte_offset; + int bd_num = 5; + int offset; + int length; + int data0; + int ret; + int i; + int j; + + ret = sscanf(cmd_buf, "%x %x", &offset, &length); + if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || + length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { + dev_err(&hdev->pdev->dev, "Invalid offset or length.\n"); + return; + } + if (offset < 0 || length <= 0) { + dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n"); + return; + } + + dev_info(&hdev->pdev->dev, "offset | data\n"); + + while (length > 0) { + data0 = offset; + if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH) + data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16; + else + data0 |= length << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return; + + byte_offset = offset; + for (i = 0; i < bd_num; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", + byte_offset, + le32_to_cpu(desc[i].data[j])); + byte_offset += sizeof(u32); + length -= sizeof(u32); + if (length <= 0) + return; + } + } + offset += HCLGE_MAX_NCL_CONFIG_LENGTH; + } +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) +{ +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + + dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) hclge_dbg_dump_mng_table(hdev); } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { + hclge_dbg_dump_rst_info(hdev); + } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { + hclge_dbg_dump_ncl_config(hdev, + &cmd_buf[sizeof("dump ncl_config")]); + } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { + hclge_dbg_dump_mac_tnl_status(hdev); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 1f52d11f77b5..4ac80634c984 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,287 +4,468 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow" }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun" }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" }, + { .int_msk = BIT(0), .msg = "rx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" }, - { .int_msk = BIT(27), - .msg = "flow_director_ad_mem0_ecc_mbit_err" }, - { .int_msk = BIT(28), - .msg = "flow_director_ad_mem1_ecc_mbit_err" }, - { .int_msk = BIT(29), - .msg = "rx_vlan_tag_memory_ecc_mbit_err" }, - { .int_msk = BIT(30), - .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(12), - .msg = "tm_sch_port_shap_offset_fifo_wr_err" }, - { .int_msk = BIT(13), - .msg = "tm_sch_port_shap_offset_fifo_rd_err" }, - { .int_msk = BIT(14), - .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(15), - .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(16), - .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(17), - .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(18), - .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(19), - .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(20), - .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(21), - .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" }, + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" }, + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "rd_bus_err" }, - { .int_msk = BIT(27), .msg = "wr_bus_err" }, - { .int_msk = BIT(28), .msg = "reg_search_miss" }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss" }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" }, + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout" }, + { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err" }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err" }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err" }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err" }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err" }, + { .int_msk = BIT(0), .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET } static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" }, + { .int_msk = BIT(0), .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" }, + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; @@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, char *reg, - const struct hclge_hw_error *err, - u32 err_sts) +static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts) { + enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET; + bool need_reset = false; + while (err->msg) { - if (err->int_msk & err_sts) + if (err->int_msk & err_sts) { dev_warn(dev, "%s %s found [error status=0x%x]\n", reg, err->msg, err_sts); + if (err->reset_level != HNAE3_NONE_RESET && + err->reset_level >= reset_level) { + reset_level = err->reset_level; + need_reset = true; + } + } err++; } + if (need_reset) + return reset_level; + else + return HNAE3_NONE_RESET; } /* hclge_cmd_query_error: read the error information @@ -454,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, return ret; } +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; @@ -673,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) return ret; } +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, bool en) { @@ -826,6 +1089,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, int num) { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_level; struct device *dev = &hdev->pdev->dev; __le32 *desc_data; u32 status; @@ -845,78 +1109,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, /* log HNS common errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", - &hclge_imp_tcm_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", - &hclge_cmdq_nic_mem_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) { dev_warn(dev, "imp_rd_data_poison_err found\n"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET); } status = le32_to_cpu(desc[0].data[3]); if (status) { - hclge_log_error(dev, "TQP_INT_ECC_INT_STS", - &hclge_tqp_int_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[4]); if (status) { - hclge_log_error(dev, "MSIX_ECC_INT_STS", - &hclge_msix_sram_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log SSU(Storage Switch Unit) errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", - &hclge_ssu_mem_ecc_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; if (status) { - hclge_log_error(dev, "SSU_COMMON_ERR_INT", - &hclge_ssu_com_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_INT_STS", - &hclge_igu_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPP(Programmable Packet Process) errors */ desc_data = (__le32 *)&desc[4]; status = le32_to_cpu(*(desc_data + 1)); - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", - &hclge_ppp_mpf_abnormal_int_st1[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", - &hclge_ppp_mpf_abnormal_int_st3[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[5]; @@ -924,55 +1204,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, if (status) { dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", "rpu_rx_pkt_ecc_mbit_err"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", - &hclge_ppu_mpf_abnormal_int_st3[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log TM(Traffic Manager) errors */ desc_data = (__le32 *)&desc[6]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "TM_SCH_RINT", - &hclge_tm_sch_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log QCN(Quantized Congestion Control) errors */ desc_data = (__le32 *)&desc[7]; status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_FIFO_RINT", - &hclge_qcn_fifo_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_ECC_RINT", - &hclge_qcn_ecc_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log NCSI errors */ desc_data = (__le32 *)&desc[9]; status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "NCSI_ECC_INT_RPT", - &hclge_ncsi_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* clear all main PF RAS errors */ @@ -1000,6 +1285,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; + enum hnae3_reset_type reset_level; __le32 *desc_data; u32 status; int ret; @@ -1018,38 +1304,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, /* log SSU(Storage Switch Unit) errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", - &hclge_ssu_fifo_overflow_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[2]); if (status) { - hclge_log_error(dev, "SSU_ETS_TCG_INT", - &hclge_ssu_ets_tcg_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", - &hclge_igu_egu_tnl_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1341,16 +1636,15 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests) { + struct hclge_mac_tnl_stats mac_tnl_stats; struct device *dev = &hdev->pdev->dev; u32 mpf_bd_num, pf_bd_num, bd_num; + enum hnae3_reset_type reset_level; struct hclge_desc desc_bd; struct hclge_desc *desc; __le32 *desc_data; - int ret = 0; u32 status; - - /* set default handling */ - set_bit(HNAE3_FUNC_RESET, reset_requests); + int ret; /* query the number of bds for the MSIx int status */ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM, @@ -1359,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "fail(%d) to query msix int status bd num\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); return ret; } @@ -1381,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1390,9 +1680,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", - &hclge_mac_afifo_tnl_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], + status); + set_bit(reset_level, reset_requests); } /* log PPU(RCB) MPF errors */ @@ -1400,9 +1691,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - set_bit(HNAE3_CORE_RESET, reset_requests); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + set_bit(reset_level, reset_requests); } /* clear all main PF MSIx errors */ @@ -1413,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1428,32 +1719,37 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } /* log SSU PF errors */ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_pf_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status); + set_bit(reset_level, reset_requests); } /* read and log PPP PF errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*desc_data); - if (status) - hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", - &hclge_ppp_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* clear all PF MSIx errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1463,8 +1759,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret); + goto msi_error; + } + + status = le32_to_cpu(desc->data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "clear mac tnl int failed (%d)\n", ret); + set_bit(HNAE3_NONE_RESET, reset_requests); } msi_error: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index fc068280d391..9645590c9294 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -47,6 +47,9 @@ #define HCLGE_NCSI_ERR_INT_TYPE 0x9 #define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF #define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -112,8 +115,10 @@ struct hclge_hw_blk { struct hclge_hw_error { u32 int_msk; const char *msg; + enum hnae3_reset_type reset_level; }; +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); int hclge_handle_hw_msix_error(struct hclge_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index deda606c51e7..effe89fa10dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/if_vlan.h> +#include <linux/crash_dump.h> #include <net/rtnetlink.h> #include "hclge_cmd.h" #include "hclge_dcb.h" @@ -31,6 +32,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, u16 *allocated_size, bool is_alloc); @@ -697,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) p = hclge_tqps_get_stats(handle, p); } +static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; + *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; +} + static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { @@ -1015,6 +1027,23 @@ static int hclge_get_cap(struct hclge_dev *hdev) return ret; } +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hclge_cfg cfg; @@ -1074,6 +1103,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + hclge_init_kdump_kernel_config(hdev); + return ret; } @@ -1337,6 +1368,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->rxvlan_cfg.rx_vlan_offload_en = true; INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); @@ -1399,7 +1432,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, return ret; } -static int hclge_get_tc_num(struct hclge_dev *hdev) +static u32 hclge_get_tc_num(struct hclge_dev *hdev) { int i, cnt = 0; @@ -1409,17 +1442,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) return cnt; } -static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) -{ - int i, cnt = 0; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) - cnt++; - return cnt; -} - /* Get the number of pfc enabled TCs, which have private buffer */ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) @@ -1483,14 +1505,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc, u32 rx_all) { - u32 shared_buf_min, shared_buf_tc, shared_std; - int tc_num, pfc_enable_num; + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); u32 shared_buf, aligned_mps; u32 rx_priv; int i; - tc_num = hclge_get_tc_num(hdev); - pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) @@ -1499,9 +1519,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * aligned_mps + - (tc_num - pfc_enable_num) * aligned_mps / 2 + - aligned_mps; + shared_buf_tc = tc_num * aligned_mps + aligned_mps; shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), HCLGE_BUF_SIZE_UNIT); @@ -1518,19 +1536,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, } else { buf_alloc->s_buf.self.high = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; - buf_alloc->s_buf.self.low = - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + if (tc_num) + hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num; + else + hi_thrd = shared_buf - hdev->dv_buf_size; + + hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / 2; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if ((hdev->hw_tc_map & BIT(i)) && - (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; - } else { - buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; - } + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; } return true; @@ -2143,7 +2168,8 @@ static int hclge_mac_init(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { - if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) schedule_work(&hdev->mbx_service_task); } @@ -2222,6 +2248,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); rhandle = &hdev->vport[i].roce; if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, @@ -2344,6 +2371,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2352,6 +2380,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2360,12 +2389,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + hdev->rst_stats.core_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } /* check for vector0 msix event source */ - if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + dev_dbg(&hdev->pdev->dev, "received event 0x%x\n", + msix_src_reg); return HCLGE_VECTOR0_EVENT_ERR; + } /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2374,6 +2407,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_MBX; } + /* print other vector0 event source */ + dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n", + cmdq_src_reg, msix_src_reg); return HCLGE_VECTOR0_EVENT_OTHER; } @@ -2657,7 +2693,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return ret; } - if (!reset) + if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) continue; /* Inform VF to process the reset. @@ -2694,9 +2730,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) static void hclge_do_reset(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; struct pci_dev *pdev = hdev->pdev; u32 val; + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "Hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@ -2775,6 +2820,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, clear_bit(HNAE3_FLR_RESET, addr); } + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; + return rst_level; } @@ -2844,6 +2893,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) * after hclge_cmd_init is called. */ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hdev->rst_stats.pf_rst_cnt++; break; case HNAE3_FLR_RESET: /* There is no mechanism for PF to know if VF has stopped IO @@ -2852,6 +2902,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) msleep(100); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; case HNAE3_IMP_RESET: reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); @@ -2932,7 +2983,7 @@ static void hclge_reset(struct hclge_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.reset_cnt++; /* perform reset of the stack & ae device for a client */ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); if (ret) @@ -2958,6 +3009,8 @@ static void hclge_reset(struct hclge_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_reset_done_cnt++; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); if (ret) goto err_reset; @@ -3001,7 +3054,9 @@ static void hclge_reset(struct hclge_dev *hdev) hdev->last_reset_time = jiffies; hdev->reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; ae_dev->reset_type = HNAE3_NONE_RESET; + del_timer(&hdev->reset_timer); return; @@ -5194,7 +5249,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return hdev->reset_count; + return hdev->rst_stats.hw_reset_done_cnt; } static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) @@ -5282,8 +5337,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 -#define HCLGE_MAC_LINK_STATUS_MS 20 -#define HCLGE_MAC_LINK_STATUS_NUM 10 +#define HCLGE_MAC_LINK_STATUS_MS 10 +#define HCLGE_MAC_LINK_STATUS_NUM 100 #define HCLGE_MAC_LINK_STATUS_DOWN 0 #define HCLGE_MAC_LINK_STATUS_UP 1 @@ -5942,8 +5997,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + if (!ret) { + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + vport->vport_id, addr); + return 0; + } dev_err(&hdev->pdev->dev, "PF failed to add unicast entry(%pM) in the MAC table\n", @@ -6293,7 +6351,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return -EINVAL; } - if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) + if ((!is_first || is_kdump_kernel()) && + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, "remove old uc mac address fail.\n"); @@ -6543,30 +6602,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return ret; } -int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, - u16 vlan_id, bool is_kill) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, - 0, is_kill); -} - -static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, - u16 vlan, u8 qos, __be16 proto) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - - return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); -} - static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) { struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; @@ -6640,6 +6675,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) return status; } +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + vport->txvlan_cfg.accept_tag1 = false; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) { struct hclge_rx_vlan_type_cfg_cmd *rx_req; @@ -6730,34 +6811,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag1 = true; - vport->txvlan_cfg.accept_untag1 = true; - - /* accept_tag2 and accept_untag2 are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - * This two fields can not configured by user. - */ - vport->txvlan_cfg.accept_tag2 = true; - vport->txvlan_cfg.accept_untag2 = true; + u16 vlan_tag; - vport->txvlan_cfg.insert_tag1_en = false; - vport->txvlan_cfg.insert_tag2_en = false; - vport->txvlan_cfg.default_tag1 = 0; - vport->txvlan_cfg.default_tag2 = 0; - - ret = hclge_set_vlan_tx_offload_cfg(vport); - if (ret) - return ret; - - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = true; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport = &hdev->vport[i]; + vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - ret = hclge_set_vlan_rx_offload_cfg(vport); + ret = hclge_vlan_offload_cfg(vport, + vport->port_base_vlan_cfg.state, + vlan_tag); if (ret) return ret; } @@ -6765,7 +6826,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan; @@ -6777,14 +6839,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) if (!vlan) return; - vlan->hd_tbl_status = true; + vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); } -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl) +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; @@ -6847,14 +6933,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = enable; + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + } vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; return hclge_set_vlan_rx_offload_cfg(vport); } +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + new_info->qos, false); + } + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + old_info->qos, true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + struct hclge_dev *hdev = vport->back; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); + if (ret) + return ret; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(vlan_info->vlan_proto), + vport->vport_id, + vlan_info->vlan_tag, + vlan_info->qos, false); + if (ret) + return ret; + + /* remove old VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(old_vlan_info->vlan_proto), + vport->vport_id, + old_vlan_info->vlan_tag, + old_vlan_info->qos, true); + if (ret) + return ret; + + goto update; + } + + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + + /* update state only when disable/enable port based VLAN */ + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + +update: + vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; + vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; + vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_ENABLE; + } else { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_DISABLE; + else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_MODIFY; + } +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + vport = &hdev->vport[vfid]; + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + /* update port based VLAN for PF */ + if (!vfid) { + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + + return ret; + } + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + return hclge_update_port_base_vlan_cfg(vport, state, + &vlan_info); + } else { + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + (u8)vfid, state, + vlan, qos, + ntohs(proto)); + return ret; + } +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* when port based VLAN enabled, we use port based VLAN as the VLAN + * filter entry. In this case, we don't update VLAN filter table + * when user add new VLAN or remove exist VLAN, just update the vport + * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter + * table until port based VLAN disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, 0, is_kill); + writen_to_tbl = true; + } + + if (ret) + return ret; + + if (is_kill) + hclge_rm_vport_vlan_table(vport, vlan_id, false); + else + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + + return 0; +} + static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; @@ -7280,6 +7555,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); + dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); + + dev_info(dev, "PF info end.\n"); +} + static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -7301,6 +7602,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -7660,6 +7964,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + INIT_KFIFO(hdev->mac_tnl_log); + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -7708,7 +8014,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) int i; for (i = 0; i < hdev->num_alloc_vport; i++) { - hclge_vport_start(vport); + hclge_vport_stop(vport); vport++; } } @@ -7813,6 +8119,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); + hclge_config_mac_tnl_int(hdev, false); hclge_hw_error_set_state(hdev, false); hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); @@ -8258,6 +8565,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, + .get_mac_pause_stats = hclge_get_mac_pause_stat, .update_stats = hclge_update_stats, .get_strings = hclge_get_strings, .get_sset_count = hclge_get_sset_count, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b57ac4beb313..4aba6248965d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/phy.h> #include <linux/if_vlan.h> +#include <linux/kfifo.h> #include "hclge_cmd.h" #include "hnae3.h" @@ -649,6 +650,23 @@ struct hclge_vport_vlan_cfg { u16 vlan_id; }; +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 core_rst_cnt; /* the number of CORE reset */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -675,6 +693,7 @@ struct hclge_vport_vlan_cfg { (y) = (_k_ ^ ~_v_) & (_k_); \ } while (0) +#define HCLGE_MAC_TNL_LOG_SIZE 8 #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -691,7 +710,7 @@ struct hclge_dev { unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclge_rst_stats rst_stats; u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ @@ -791,6 +810,9 @@ struct hclge_dev { struct mutex umv_mutex; /* protect share_umv_size */ struct mutex vport_cfg_mutex; /* Protect stored vf table */ + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); }; /* VPort level vlan tag configuration for TX direction */ @@ -807,10 +829,11 @@ struct hclge_tx_vtag_cfg { /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - bool strip_tag1_en; /* Whether strip inner vlan tag */ - bool strip_tag2_en; /* Whether strip outer vlan tag */ - bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ - bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ + u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ + u8 strip_tag1_en; /* Whether strip inner vlan tag */ + u8 strip_tag2_en; /* Whether strip outer vlan tag */ + u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_rss_tuple_cfg { @@ -829,6 +852,17 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAX }; +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + struct hclge_vlan_info vlan_info; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -842,9 +876,10 @@ struct hclge_vport { u16 alloc_rss_size; u16 qs_offset; - u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct hclge_port_base_vlan_config port_base_vlan_cfg; struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; @@ -924,9 +959,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type); void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 306a23e486de..fe48c5634a87 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, } static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; @@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return -EIO; } - if (gen_resp) + if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT) hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); return 0; @@ -289,9 +288,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return 0; } +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto) +{ +#define MSG_DATA_SIZE 8 + + u8 msg_data[MSG_DATA_SIZE]; + + memcpy(&msg_data[0], &state, sizeof(u16)); + memcpy(&msg_data[2], &vlan_proto, sizeof(u16)); + memcpy(&msg_data[4], &qos, sizeof(u16)); + memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); + + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HLCGE_MBX_PUSH_VLAN_INFO, vfid); +} + static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { int status = 0; @@ -305,19 +320,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); - if (!status) - is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) - : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; status = hclge_en_hw_strip_rxvtag(handle, en); + } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { + struct hclge_vlan_info *vlan_info; + u16 *state; + + state = (u16 *)&mbx_req->msg[2]; + vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4]; + status = hclge_update_port_base_vlan_cfg(vport, *state, + vlan_info); + } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { + u8 state; + + state = vport->port_base_vlan_cfg.state; + status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state, + sizeof(u8)); } - if (gen_resp) - status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); - return status; } @@ -385,24 +408,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, HCLGE_TQPS_DEPTH_INFO_LEN); } +static int hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_data; + + resp_data = hdev->hw.mac.media_type; + return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data, + sizeof(resp_data)); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[10]; - u16 media_type; + u8 msg_data[8]; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; - media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -565,7 +596,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, true); + ret = hclge_set_vf_uc_mac_addr(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", @@ -579,7 +610,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, false); + ret = hclge_set_vf_vlan_cfg(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF failed(%d) to config VF's VLAN\n", @@ -662,6 +693,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_rm_vport_all_vlan_table(vport, true); mutex_unlock(&hdev->vport_cfg_mutex); break; + case HCLGE_MBX_GET_MEDIA_TYPE: + ret = hclge_get_vf_media_type(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to media type for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 48eda2c6fdae..1e8134892d77 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -3,6 +3,7 @@ #include <linux/etherdevice.h> #include <linux/kernel.h> +#include <linux/marvell_phy.h> #include "hclge_cmd.h" #include "hclge_main.h" @@ -121,12 +122,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int hclge_mac_mdio_config(struct hclge_dev *hdev) { +#define PHY_INEXISTENT 255 + struct hclge_mac *mac = &hdev->hw.mac; struct phy_device *phydev; struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", hdev->hw.mac.phy_addr); return -EINVAL; @@ -203,6 +210,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, PHY_INTERFACE_MODE_SGMII); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index aafc69f4bfdd..a7bbb6d3091a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ret = hclge_pfc_setup_hw(hdev); if (init && ret == -EOPNOTSUPP) dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); - else + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); return ret; + } return hclge_tm_bp_setup(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 9441b453d38d..71f356fc2446 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) return ring->desc_num - used - 1; } +static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, + int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) { + struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclgevf_desc *desc; int clean = 0; u32 head; - desc = &csq->desc[ntc]; head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclgevf_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EIO; } - csq->next_to_clean = ntc; + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -321,13 +334,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; - hdev->arq.count = 0; + atomic_set(&hdev->arq.count, 0); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; @@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) hclgevf_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); @@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) * reset may happen when lower level reset is being processed. */ if (hclgevf_is_reset_pending(hdev)) { - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } /* get firmware version */ @@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 8bc28e6f465f..6ce5b036fbf4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, + NULL, 0, true, &resp_msg, sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port based vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; + + return 0; +} + static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 @@ -307,6 +328,25 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) return qid_in_pf; } +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, + true, &resp_msg, sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg; + + return 0; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; @@ -404,7 +444,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) } } -void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) { #define HCLGEVF_ADVERTISING 0 #define HCLGEVF_SUPPORTED 1 @@ -1375,9 +1415,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) case HNAE3_VF_FUNC_RESET: ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 0, true, NULL, sizeof(u8)); + hdev->rst_stats.vf_func_rst_cnt++; break; case HNAE3_FLR_RESET: set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; default: break; @@ -1400,7 +1442,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.rst_cnt++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ @@ -1426,6 +1468,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_rst_done_cnt++; + rtnl_lock(); /* now, re-initialize the nic client and ae device*/ @@ -1444,6 +1488,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) hdev->last_reset_time = jiffies; ae_dev->reset_type = HNAE3_NONE_RESET; + hdev->rst_stats.rst_done_cnt++; return ret; err_reset_lock: @@ -1455,6 +1500,8 @@ err_reset: */ hclgevf_cmd_init(hdev); dev_err(&hdev->pdev->dev, "failed to reset VF\n"); + if (hclgevf_is_reset_pending(hdev)) + hclgevf_reset_task_schedule(hdev); return ret; } @@ -1564,8 +1611,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { - if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && - !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) { set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); schedule_work(&hdev->rst_service_task); } @@ -1603,6 +1649,7 @@ static void hclgevf_service_timer(struct timer_list *t) mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + hdev->stats_timer++; hclgevf_task_schedule(hdev); } @@ -1711,7 +1758,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work) hdev = container_of(work, struct hclgevf_dev, keep_alive_task); - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) return; ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, @@ -1723,9 +1770,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work) { + struct hnae3_handle *handle; struct hclgevf_dev *hdev; hdev = container_of(work, struct hclgevf_dev, service_task); + handle = &hdev->nic; + + if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { + hclgevf_tqps_update_stats(handle); + hdev->stats_timer = 0; + } /* request the link status from the PF. PF would be able to tell VF * about such updates in future so we might remove this later @@ -1762,6 +1816,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); *clearval = cmdq_src_reg; + hdev->rst_stats.vf_rst_cnt++; return HCLGEVF_VECTOR0_EVENT_RST; } @@ -1814,6 +1869,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); if (ret) @@ -1824,6 +1884,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_get_pf_media_type(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1986,8 +2050,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - for (i = 0; i < handle->kinfo.num_tqps; i++) - hclgevf_reset_tqp(handle, i); + if (hdev->reset_type != HNAE3_VF_RESET) + for (i = 0; i < handle->kinfo.num_tqps; i++) + if (hclgevf_reset_tqp(handle, i)) + break; /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); @@ -2007,9 +2073,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) static int hclgevf_client_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, true); + if (ret) + return ret; mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); - return hclgevf_set_alive(handle, true); + + return 0; } static void hclgevf_client_stop(struct hnae3_handle *handle) @@ -2051,6 +2123,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev) { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + if (hdev->keep_alive_timer.function) + del_timer_sync(&hdev->keep_alive_timer); + if (hdev->keep_alive_task.func) + cancel_work_sync(&hdev->keep_alive_task); if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) @@ -2155,6 +2231,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } +static void hclgevf_info_show(struct hclgevf_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "VF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %d\n", + hdev->hw.mac.media_type); + + dev_info(dev, "VF info end.\n"); +} + static int hclgevf_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -2172,6 +2265,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->nic)) + hclgevf_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -2677,7 +2773,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return hdev->reset_count; + return hdev->rst_stats.hw_rst_done_cnt; } static void hclgevf_get_link_mode(struct hnae3_handle *handle, @@ -2756,6 +2852,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } } +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size) +{ + struct hnae3_handle *nic = &hdev->nic; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + + /* send msg to PF and wait update port based vlan info */ + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG, + port_base_vlan_info, data_size, + false, NULL, 0); + + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index c128863ee7d0..ee3a6cbe87d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -116,6 +116,8 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +#define HCLGEVF_STATS_TIMER_INTERVAL (36) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -210,6 +212,15 @@ struct hclgevf_misc_vector { int vector_irq; }; +struct hclgevf_rst_stats { + u32 rst_cnt; /* the number of reset */ + u32 vf_func_rst_cnt; /* the number of VF function reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 vf_rst_cnt; /* the number of VF reset */ + u32 rst_done_cnt; /* the number of reset completed */ + u32 hw_rst_done_cnt; /* the number of HW reset completed */ +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -227,7 +238,7 @@ struct hclgevf_dev { #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclgevf_rst_stats rst_stats; u32 reset_attempts; u32 fw_version; @@ -272,6 +283,7 @@ struct hclgevf_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; u32 flag; + u32 stats_timer; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) @@ -290,4 +302,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, u8 duplex); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 7dc3c9f79169..30f2e9352cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, - "VF could not get mbx resp(=%d) from PF in %d tries\n", - hdev->mbx_resp.received_resp, i); + "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n", + code0, code1, hdev->mbx_resp.received_resp, i); return -EIO; } @@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { dev_err(&hdev->pdev->dev, - "VF could not match resp code(code0=%d,code1=%d), %d", + "VF could not match resp code(code0=%d,code1=%d), %d\n", code0, code1, mbx_resp->resp_status); + dev_err(&hdev->pdev->dev, + "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n", + r_code0, r_code1); return -EIO; } @@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT : + ~HCLGE_MBX_NEED_RESP_BIT; req->msg[0] = code; req->msg[1] = subcode; memcpy(&req->msg[2], msg_data, msg_len); @@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: + case HLCGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* we will drop the async msg if we find ARQ as full * and continue with next message */ - if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%d)\n", req->msg[1]); @@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); - hdev->arq.count++; + atomic_inc(&hdev->arq.count); hclgevf_mbx_task_schedule(hdev); @@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { enum hnae3_reset_type reset_type; - u16 link_status; - u16 *msg_q; + u16 link_status, state; + u16 *msg_q, *vlan_info; u8 duplex; u32 speed; u32 tail; @@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); - hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); @@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; + case HLCGE_MBX_PUSH_VLAN_INFO: + state = le16_to_cpu(msg_q[1]); + vlan_info = &msg_q[1]; + hclgevf_update_port_base_vlan_info(hdev, state, + (u8 *)vlan_info, 8); + break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%d) message from arq\n", @@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } hclge_mbx_head_ptr_move_arq(hdev->arq); - hdev->arq.count--; + atomic_dec(&hdev->arq.count); msg_q = hdev->arq.msg_q[hdev->arq.head]; } } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index e17bf33eba0c..0fbe8046824b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -518,7 +518,7 @@ process_sq_wqe: flush_skbs: netdev_txq = netdev_get_tx_queue(netdev, q_id); - if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) + if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); return err; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 90b62c1412c8..707c8ba120c2 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1463,7 +1463,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, memset(pr, 0, sizeof(struct ehea_port_res)); - pr->tx_bytes = rx_bytes; + pr->tx_bytes = tx_bytes; pr->tx_packets = tx_packets; pr->rx_bytes = rx_bytes; pr->rx_packets = rx_packets; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 5e4e37132bf2..77ce17383aba 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -123,8 +123,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int nr_of_cqe, u64 eq_handle, u32 cq_token) { struct ehea_cq *cq; - struct h_epa epa; - u64 *cq_handle_ref, hret, rpage; + u64 hret, rpage; u32 counter; int ret; void *vpage; @@ -139,8 +138,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, cq->adapter = adapter; - cq_handle_ref = &cq->fw_handle; - hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, &cq->fw_handle, &cq->epas); if (hret != H_SUCCESS) { @@ -188,7 +185,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, } hw_qeit_reset(&cq->hw_queue); - epa = cq->epas.kernel; ehea_reset_cq_ep(cq); ehea_reset_cq_n1(cq); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index dd71d5db7274..d86b0e5895a6 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -93,7 +93,7 @@ struct ibmveth_stat { #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) -struct ibmveth_stat ibmveth_stats[] = { +static struct ibmveth_stat ibmveth_stats[] = { { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, { "replenish_add_buff_failure", diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3dfb2d131eb7..b398d6c94dbd 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -120,6 +120,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); static int init_crq_queue(struct ibmvnic_adapter *adapter); +static int send_query_phys_parms(struct ibmvnic_adapter *adapter); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -1968,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; - struct net_device *netdev; bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); - netdev = adapter->netdev; /* netif_set_real_num_xx_queues needs to take rtnl lock here * unless wait_for_reset is set, in which case the rtnl lock @@ -2279,23 +2278,20 @@ static const struct net_device_ops ibmvnic_netdev_ops = { static int ibmvnic_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - u32 supported, advertising; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + int rc; - supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | - SUPPORTED_FIBRE); - advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - cmd->base.speed = SPEED_1000; - cmd->base.duplex = DUPLEX_FULL; + rc = send_query_phys_parms(adapter); + if (rc) { + adapter->speed = SPEED_UNKNOWN; + adapter->duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = adapter->speed; + cmd->base.duplex = adapter->duplex; cmd->base.port = PORT_FIBRE; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_ENABLE; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - return 0; } @@ -2923,8 +2919,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) goto req_tx_irq_failed; } + snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", + adapter->vdev->unit_address, i); rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, - 0, "ibmvnic_tx", scrq); + 0, scrq->name, scrq); if (rc) { dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", @@ -2944,8 +2942,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) dev_err(dev, "Error mapping irq\n"); goto req_rx_irq_failed; } + snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", + adapter->vdev->unit_address, i); rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, - 0, "ibmvnic_rx", scrq); + 0, scrq->name, scrq); if (rc) { dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", scrq->irq, rc); @@ -4297,6 +4297,73 @@ out: } } +static int send_query_phys_parms(struct ibmvnic_adapter *adapter) +{ + union ibmvnic_crq crq; + int rc; + + memset(&crq, 0, sizeof(crq)); + crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; + crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; + init_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return rc; + wait_for_completion(&adapter->fw_done); + return adapter->fw_done_rc ? -EIO : 0; +} + +static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int rc; + + rc = crq->query_phys_parms_rsp.rc.code; + if (rc) { + netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); + return rc; + } + switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) { + case IBMVNIC_10MBPS: + adapter->speed = SPEED_10; + break; + case IBMVNIC_100MBPS: + adapter->speed = SPEED_100; + break; + case IBMVNIC_1GBPS: + adapter->speed = SPEED_1000; + break; + case IBMVNIC_10GBP: + adapter->speed = SPEED_10000; + break; + case IBMVNIC_25GBPS: + adapter->speed = SPEED_25000; + break; + case IBMVNIC_40GBPS: + adapter->speed = SPEED_40000; + break; + case IBMVNIC_50GBPS: + adapter->speed = SPEED_50000; + break; + case IBMVNIC_100GBPS: + adapter->speed = SPEED_100000; + break; + default: + netdev_warn(netdev, "Unknown speed 0x%08x\n", + cpu_to_be32(crq->query_phys_parms_rsp.speed)); + adapter->speed = SPEED_UNKNOWN; + } + if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) + adapter->duplex = DUPLEX_FULL; + else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) + adapter->duplex = DUPLEX_HALF; + else + adapter->duplex = DUPLEX_UNKNOWN; + + return rc; +} + static void ibmvnic_handle_crq(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -4445,6 +4512,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case GET_VPD_RSP: handle_vpd_rsp(crq, adapter); break; + case QUERY_PHYS_PARMS_RSP: + adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); + complete(&adapter->fw_done); + break; default: netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", gen_crq->cmd); @@ -4600,8 +4671,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter) (unsigned long)adapter); netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); - rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, - adapter); + snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", + adapter->vdev->unit_address); + rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); if (rc) { dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index f2018dbebfa5..cffdac372a33 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -377,11 +377,16 @@ struct ibmvnic_phys_parms { u8 flags2; #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 __be32 speed; -#define IBMVNIC_AUTONEG 0x80 -#define IBMVNIC_10MBPS 0x40 -#define IBMVNIC_100MBPS 0x20 -#define IBMVNIC_1GBPS 0x10 -#define IBMVNIC_10GBPS 0x08 +#define IBMVNIC_AUTONEG 0x80000000 +#define IBMVNIC_10MBPS 0x40000000 +#define IBMVNIC_100MBPS 0x20000000 +#define IBMVNIC_1GBPS 0x10000000 +#define IBMVNIC_10GBP 0x08000000 +#define IBMVNIC_40GBPS 0x04000000 +#define IBMVNIC_100GBPS 0x02000000 +#define IBMVNIC_25GBPS 0x01000000 +#define IBMVNIC_50GBPS 0x00800000 +#define IBMVNIC_200GBPS 0x00400000 __be32 mtu; struct ibmvnic_rc rc; } __packed __aligned(8); @@ -850,6 +855,7 @@ struct ibmvnic_crq_queue { dma_addr_t msg_token; spinlock_t lock; bool active; + char name[32]; }; union sub_crq { @@ -876,6 +882,7 @@ struct ibmvnic_sub_crq_queue { struct sk_buff *rx_skb_top; struct ibmvnic_adapter *adapter; atomic_t used; + char name[32]; }; struct ibmvnic_long_term_buff { @@ -999,6 +1006,9 @@ struct ibmvnic_adapter { int phys_link_state; int logical_link_state; + u32 speed; + u8 duplex; + /* login data */ struct ibmvnic_login_buffer *login_buf; dma_addr_t login_buf_token; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 0fd268070fb4..a65d5a9ba7db 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2797,7 +2797,7 @@ static int e100_set_features(struct net_device *netdev, netdev->features = features; e100_exec_cb(nic, NULL, e100_configure); - return 0; + return 1; } static const struct net_device_ops e100_netdev_ops = { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8fe9af0e2ab7..6f72ab139fd9 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -820,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000_netdev_ops = { @@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); - if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); /* we need this if more than one processor can write to diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7acc61e4f645..a8fa4a1628f5 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5897,7 +5897,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, DIV_ROUND_UP(PAGE_SIZE, adapter->tx_fifo_limit) + 2)); - if (!skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) e1000e_update_tdt_wa(tx_ring, @@ -7003,7 +7003,7 @@ static int e1000_set_features(struct net_device *netdev, else e1000e_reset(adapter); - return 0; + return 1; } static const struct net_device_ops e1000e_netdev_ops = { @@ -7350,7 +7350,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); - if (pci_dev_run_wake(pdev)) + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp) pm_runtime_put_noidle(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index ecef949f3baa..b4d970e44163 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -280,7 +280,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, /* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); @@ -1037,7 +1037,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 50590e8d1fd1..2f21b3e89fd0 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -21,6 +21,7 @@ i40e-objs := i40e_main.o \ i40e_diag.o \ i40e_txrx.o \ i40e_ptp.o \ + i40e_ddp.o \ i40e_client.o \ i40e_virtchnl_pf.o \ i40e_xsk.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d3cc3427caad..c4afb852cb57 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -321,6 +321,29 @@ struct i40e_udp_port_config { u8 filter_index; }; +#define I40_DDP_FLASH_REGION 100 +#define I40E_PROFILE_INFO_SIZE 48 +#define I40E_MAX_PROFILE_NUM 16 +#define I40E_PROFILE_LIST_SIZE \ + (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4) +#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/" +#define I40E_DDP_PROFILE_NAME_MAX 64 + +int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, + bool is_add); +int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash); + +struct i40e_ddp_profile_list { + u32 p_count; + struct i40e_profile_info p_info[0]; +}; + +struct i40e_ddp_old_profile_list { + struct list_head list; + size_t old_ddp_size; + u8 old_ddp_buf[0]; +}; + /* macros related to FLX_PIT */ #define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \ I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ @@ -589,6 +612,8 @@ struct i40e_pf { struct sk_buff *ptp_tx_skb; unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; + struct timespec64 ptp_prev_hw_time; + ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; u32 tx_hwtstamp_timeouts; @@ -610,6 +635,8 @@ struct i40e_pf { u16 override_q_count; u16 last_sw_conf_flags; u16 last_sw_conf_valid_flags; + /* List to keep previous DDP profiles to be rolled back in the future */ + struct list_head ddp_old_prof; }; /** @@ -1083,6 +1110,8 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); void i40e_ptp_set_increment(struct i40e_pf *pf); int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_save_hw_time(struct i40e_pf *pf); +void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 7ab61f6ebb5f..45f6adc8ff2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -749,7 +749,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); - status = I40E_ERR_QUEUE_EMPTY; + status = I40E_ERR_ADMIN_QUEUE_FULL; goto asq_send_command_error; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 11506102471c..522058a7d4be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,8 +11,8 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0006 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 +#define I40E_FW_API_VERSION_MINOR_X722 0x0008 +#define I40E_FW_API_VERSION_MINOR_X710 0x0008 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 97a9b1fb4763..dd6b3b3ac5c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1466,7 +1466,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) **/ u32 i40e_led_get(struct i40e_hw *hw) { - u32 current_mode = 0; u32 mode = 0; int i; @@ -1479,21 +1478,6 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); - switch (current_mode) { - case I40E_COMBINED_ACTIVITY: - case I40E_FILTER_ACTIVITY: - case I40E_MAC_ACTIVITY: - case I40E_LINK_ACTIVITY: - continue; - default: - break; - } - mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; @@ -1513,7 +1497,6 @@ u32 i40e_led_get(struct i40e_hw *hw) **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { - u32 current_mode = 0; int i; if (mode & 0xfffffff0) @@ -1527,22 +1510,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; - - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); - switch (current_mode) { - case I40E_COMBINED_ACTIVITY: - case I40E_FILTER_ACTIVITY: - case I40E_MAC_ACTIVITY: - case I40E_LINK_ACTIVITY: - continue; - default: - break; - } - gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & @@ -5448,6 +5415,163 @@ i40e_find_segment_in_package(u32 segment_type, return NULL; } +/* Get section table in profile */ +#define I40E_SECTION_TABLE(profile, sec_tbl) \ + do { \ + struct i40e_profile_segment *p = (profile); \ + u32 count; \ + u32 *nvm; \ + count = p->device_table_count; \ + nvm = (u32 *)&p->device_table[count]; \ + sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ + } while (0) + +/* Get section header in profile */ +#define I40E_SECTION_HEADER(profile, offset) \ + (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) + +/** + * i40e_find_section_in_profile + * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) + * @profile: pointer to the i40e segment header to be searched + * + * This function searches i40e segment for a particular section type. On + * success it returns a pointer to the section header, otherwise it will + * return NULL. + **/ +struct i40e_profile_section_header * +i40e_find_section_in_profile(u32 section_type, + struct i40e_profile_segment *profile) +{ + struct i40e_profile_section_header *sec; + struct i40e_section_table *sec_tbl; + u32 sec_off; + u32 i; + + if (profile->header.type != SEGMENT_TYPE_I40E) + return NULL; + + I40E_SECTION_TABLE(profile, sec_tbl); + + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + if (sec->section.type == section_type) + return sec; + } + + return NULL; +} + +/** + * i40e_ddp_exec_aq_section - Execute generic AQ for DDP + * @hw: pointer to the hw struct + * @aq: command buffer containing all data to execute AQ + **/ +static enum +i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, + struct i40e_profile_aq_section *aq) +{ + i40e_status status; + struct i40e_aq_desc desc; + u8 *msg = NULL; + u16 msglen; + + i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); + desc.flags |= cpu_to_le16(aq->flags); + memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); + + msglen = aq->datalen; + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + msg = &aq->data[0]; + } + + status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "unable to exec DDP AQ opcode %u, error %d\n", + aq->opcode, status); + return status; + } + + /* copy returned desc to aq_buf */ + memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); + + return 0; +} + +/** + * i40e_validate_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be validated + * @track_id: package tracking id + * @rollback: flag if the profile is for rollback. + * + * Validates supported devices and profile's sections. + */ +static enum i40e_status_code +i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id, bool rollback) +{ + struct i40e_profile_section_header *sec = NULL; + i40e_status status = 0; + struct i40e_section_table *sec_tbl; + u32 vendor_dev_id; + u32 dev_cnt; + u32 sec_off; + u32 i; + + if (track_id == I40E_DDP_TRACKID_INVALID) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); + return I40E_NOT_SUPPORTED; + } + + dev_cnt = profile->device_table_count; + for (i = 0; i < dev_cnt; i++) { + vendor_dev_id = profile->device_table[i].vendor_dev_id; + if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && + hw->device_id == (vendor_dev_id & 0xFFFF)) + break; + } + if (dev_cnt && i == dev_cnt) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Device doesn't support DDP\n"); + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + I40E_SECTION_TABLE(profile, sec_tbl); + + /* Validate sections types */ + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + if (rollback) { + if (sec->section.type == SECTION_TYPE_MMIO || + sec->section.type == SECTION_TYPE_AQ || + sec->section.type == SECTION_TYPE_RB_AQ) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Not a roll-back package\n"); + return I40E_NOT_SUPPORTED; + } + } else { + if (sec->section.type == SECTION_TYPE_RB_AQ || + sec->section.type == SECTION_TYPE_RB_MMIO) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Not an original package\n"); + return I40E_NOT_SUPPORTED; + } + } + } + + return status; +} + /** * i40e_write_profile * @hw: pointer to the hardware structure @@ -5463,47 +5587,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, i40e_status status = 0; struct i40e_section_table *sec_tbl; struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; + struct i40e_profile_aq_section *ddp_aq; u32 section_size = 0; u32 offset = 0, info = 0; + u32 sec_off; u32 i; - dev_cnt = profile->device_table_count; + status = i40e_validate_profile(hw, profile, track_id, false); + if (status) + return status; - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) + I40E_SECTION_TABLE(profile, sec_tbl); + + for (i = 0; i < sec_tbl->section_count; i++) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); + /* Process generic admin command */ + if (sec->section.type == SECTION_TYPE_AQ) { + ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; + status = i40e_ddp_exec_aq_section(hw, ddp_aq); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to execute aq: section %d, opcode %u\n", + i, ddp_aq->opcode); break; + } + sec->section.type = SECTION_TYPE_RB_AQ; + } + + /* Skip any non-mmio sections */ + if (sec->section.type != SECTION_TYPE_MMIO) + continue; + + section_size = sec->section.size + + sizeof(struct i40e_profile_section_header); + + /* Write MMIO section */ + status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, + track_id, &offset, &info, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, + "Failed to write profile: section %d, offset %d, info %d\n", + i, offset, info); + break; + } } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } + return status; +} + +/** + * i40e_rollback_profile + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package to be removed + * @track_id: package tracking id + * + * Rolls back previously loaded package. + */ +enum i40e_status_code +i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 track_id) +{ + struct i40e_profile_section_header *sec = NULL; + i40e_status status = 0; + struct i40e_section_table *sec_tbl; + u32 offset = 0, info = 0; + u32 section_size = 0; + u32 sec_off; + int i; + + status = i40e_validate_profile(hw, profile, track_id, true); + if (status) + return status; - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; + I40E_SECTION_TABLE(profile, sec_tbl); - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); + /* For rollback write sections in reverse */ + for (i = sec_tbl->section_count - 1; i >= 0; i--) { + sec_off = sec_tbl->section_offset[i]; + sec = I40E_SECTION_HEADER(profile, sec_off); - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) + /* Skip any non-rollback sections */ + if (sec->section.type != SECTION_TYPE_RB_MMIO) continue; section_size = sec->section.size + sizeof(struct i40e_profile_section_header); - /* Write profile */ + /* Write roll-back MMIO section */ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); + "Failed to write profile: section %d, offset %d, info %d\n", + i, offset, info); break; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 56bff8faf371..292eeb3def10 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -863,22 +863,23 @@ out: /** * i40e_init_dcb * @hw: pointer to the hw struct + * @enable_mib_change: enable mib change event * * Update DCB configuration from the Firmware **/ -i40e_status i40e_init_dcb(struct i40e_hw *hw) +i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change) { i40e_status ret = 0; struct i40e_lldp_variables lldp_cfg; u8 adminstatus = 0; if (!hw->func_caps.dcb) - return ret; + return I40E_NOT_SUPPORTED; /* Read LLDP NVM area */ ret = i40e_read_lldp_cfg(hw, &lldp_cfg); if (ret) - return ret; + return I40E_ERR_NOT_READY; /* Get the LLDP AdminStatus for the current port */ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); @@ -887,7 +888,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) /* LLDP agent disabled */ if (!adminstatus) { hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; - return ret; + return I40E_ERR_NOT_READY; } /* Get DCBX status */ @@ -896,26 +897,19 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) return ret; /* Check the DCBX Status */ - switch (hw->dcbx_status) { - case I40E_DCBX_STATUS_DONE: - case I40E_DCBX_STATUS_IN_PROGRESS: + if (hw->dcbx_status == I40E_DCBX_STATUS_DONE || + hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) { /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); if (ret) return ret; - break; - case I40E_DCBX_STATUS_DISABLED: - return ret; - case I40E_DCBX_STATUS_NOT_STARTED: - case I40E_DCBX_STATUS_MULTIPLE_PEERS: - default: - break; + } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { + return I40E_ERR_NOT_READY; } /* Configure the LLDP MIB change event */ - ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); - if (ret) - return ret; + if (enable_mib_change) + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 2b748a60a843..ddb48ae7cce4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -124,5 +124,5 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, u8 bridgetype, struct i40e_dcbx_config *dcbcfg); i40e_status i40e_get_dcb_config(struct i40e_hw *hw); -i40e_status i40e_init_dcb(struct i40e_hw *hw); +i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change); #endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c new file mode 100644 index 000000000000..5e08f100c413 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#include "i40e.h" + +#include <linux/firmware.h> + +/** + * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent + * @a: new profile info + * @b: old profile info + * + * checks if DDP profiles are the equivalent. + * Returns true if profiles are the same. + **/ +static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a, + struct i40e_profile_info *b) +{ + return a->track_id == b->track_id && + !memcmp(&a->version, &b->version, sizeof(a->version)) && + !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE); +} + +/** + * i40e_ddp_does_profile_exist - checks if DDP profile loaded already + * @hw: HW data structure + * @pinfo: DDP profile information structure + * + * checks if DDP profile loaded already. + * Returns >0 if the profile exists. + * Returns 0 if the profile is absent. + * Returns <0 if error. + **/ +static int i40e_ddp_does_profile_exist(struct i40e_hw *hw, + struct i40e_profile_info *pinfo) +{ + struct i40e_ddp_profile_list *profile_list; + u8 buff[I40E_PROFILE_LIST_SIZE]; + i40e_status status; + int i; + + status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, + NULL); + if (status) + return -1; + + profile_list = (struct i40e_ddp_profile_list *)buff; + for (i = 0; i < profile_list->p_count; i++) { + if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i])) + return 1; + } + return 0; +} + +/** + * i40e_ddp_profiles_overlap - checks if DDP profiles overlap. + * @new: new profile info + * @old: old profile info + * + * checks if DDP profiles overlap. + * Returns true if profiles are overlap. + **/ +static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new, + struct i40e_profile_info *old) +{ + unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16); + unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16); + + /* 0x00 group must be only the first */ + if (group_id_new == 0) + return true; + /* 0xFF group is compatible with anything else */ + if (group_id_new == 0xFF || group_id_old == 0xFF) + return false; + /* otherwise only profiles from the same group are compatible*/ + return group_id_old != group_id_new; +} + +/** + * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one. + * @hw: HW data structure + * @pinfo: DDP profile information structure + * + * checks if DDP profile overlaps with existing one. + * Returns >0 if the profile overlaps. + * Returns 0 if the profile is ok. + * Returns <0 if error. + **/ +static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw, + struct i40e_profile_info *pinfo) +{ + struct i40e_ddp_profile_list *profile_list; + u8 buff[I40E_PROFILE_LIST_SIZE]; + i40e_status status; + int i; + + status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0, + NULL); + if (status) + return -EIO; + + profile_list = (struct i40e_ddp_profile_list *)buff; + for (i = 0; i < profile_list->p_count; i++) { + if (i40e_ddp_profiles_overlap(pinfo, + &profile_list->p_info[i])) + return 1; + } + return 0; +} + +/** + * i40e_add_pinfo + * @hw: pointer to the hardware structure + * @profile: pointer to the profile segment of the package + * @profile_info_sec: buffer for information section + * @track_id: package tracking id + * + * Register a profile to the list of loaded profiles. + */ +static enum i40e_status_code +i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + struct i40e_profile_section_header *sec; + struct i40e_profile_info *pinfo; + i40e_status status; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_DDP_ADD_TRACKID; + + /* Clear reserved field */ + memset(pinfo->reserved, 0, sizeof(pinfo->reserved)); + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); + + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} + +/** + * i40e_del_pinfo - delete DDP profile info from NIC + * @hw: HW data structure + * @profile: DDP profile segment to be deleted + * @profile_info_sec: DDP profile section header + * @track_id: track ID of the profile for deletion + * + * Removes DDP profile from the NIC. + **/ +static enum i40e_status_code +i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u8 *profile_info_sec, u32 track_id) +{ + struct i40e_profile_section_header *sec; + struct i40e_profile_info *pinfo; + i40e_status status; + u32 offset = 0, info = 0; + + sec = (struct i40e_profile_section_header *)profile_info_sec; + sec->tbl_size = 1; + sec->data_end = sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info); + sec->section.type = SECTION_TYPE_INFO; + sec->section.offset = sizeof(struct i40e_profile_section_header); + sec->section.size = sizeof(struct i40e_profile_info); + pinfo = (struct i40e_profile_info *)(profile_info_sec + + sec->section.offset); + pinfo->track_id = track_id; + pinfo->version = profile->version; + pinfo->op = I40E_DDP_REMOVE_TRACKID; + + /* Clear reserved field */ + memset(pinfo->reserved, 0, sizeof(pinfo->reserved)); + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); + + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, + track_id, &offset, &info, NULL); + return status; +} + +/** + * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks + * @netdev: net device structure (for logging purposes) + * @pkg_hdr: pointer to package header + * @size_huge: size of the whole DDP profile package in size_t + * + * Checks correctness of pkg header: Version, size too big/small, and + * all segment offsets alignment and boundaries. This function lets + * reject non DDP profile file to be loaded by administrator mistake. + **/ +static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev, + struct i40e_package_header *pkg_hdr, + size_t size_huge) +{ + u32 size = 0xFFFFFFFFU & size_huge; + u32 pkg_hdr_size; + u32 segment; + + if (!pkg_hdr) + return false; + + if (pkg_hdr->version.major > 0) { + struct i40e_ddp_version ver = pkg_hdr->version; + + netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u", + ver.major, ver.minor, ver.update, ver.draft); + return false; + } + if (size_huge > size) { + netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G"); + return false; + } + if (size < (sizeof(struct i40e_package_header) + + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { + netdev_err(netdev, "Invalid DDP profile - size is too small."); + return false; + } + + pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U); + if (size < pkg_hdr_size) { + netdev_err(netdev, "Invalid DDP profile - too many segments"); + return false; + } + for (segment = 0; segment < pkg_hdr->segment_count; ++segment) { + u32 offset = pkg_hdr->segment_offset[segment]; + + if (0xFU & offset) { + netdev_err(netdev, + "Invalid DDP profile %u segment alignment", + segment); + return false; + } + if (pkg_hdr_size > offset || offset >= size) { + netdev_err(netdev, + "Invalid DDP profile %u segment offset", + segment); + return false; + } + } + + return true; +} + +/** + * i40e_ddp_load - performs DDP loading + * @netdev: net device structure + * @data: buffer containing recipe file + * @size: size of the buffer + * @is_add: true when loading profile, false when rolling back the previous one + * + * Checks correctness and loads DDP profile to the NIC. The function is + * also used for rolling back previously loaded profile. + **/ +int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, + bool is_add) +{ + u8 profile_info_sec[sizeof(struct i40e_profile_section_header) + + sizeof(struct i40e_profile_info)]; + struct i40e_metadata_segment *metadata_hdr; + struct i40e_profile_segment *profile_hdr; + struct i40e_profile_info pinfo; + struct i40e_package_header *pkg_hdr; + i40e_status status; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 track_id; + int istatus; + + pkg_hdr = (struct i40e_package_header *)data; + if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size)) + return -EINVAL; + + if (size < (sizeof(struct i40e_package_header) + + sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) { + netdev_err(netdev, "Invalid DDP recipe size."); + return -EINVAL; + } + + /* Find beginning of segment data in buffer */ + metadata_hdr = (struct i40e_metadata_segment *) + i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); + if (!metadata_hdr) { + netdev_err(netdev, "Failed to find metadata segment in DDP recipe."); + return -EINVAL; + } + + track_id = metadata_hdr->track_id; + profile_hdr = (struct i40e_profile_segment *) + i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr); + if (!profile_hdr) { + netdev_err(netdev, "Failed to find profile segment in DDP recipe."); + return -EINVAL; + } + + pinfo.track_id = track_id; + pinfo.version = profile_hdr->version; + if (is_add) + pinfo.op = I40E_DDP_ADD_TRACKID; + else + pinfo.op = I40E_DDP_REMOVE_TRACKID; + + memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE); + + /* Check if profile data already exists*/ + istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo); + if (istatus < 0) { + netdev_err(netdev, "Failed to fetch loaded profiles."); + return istatus; + } + if (is_add) { + if (istatus > 0) { + netdev_err(netdev, "DDP profile already loaded."); + return -EINVAL; + } + istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo); + if (istatus < 0) { + netdev_err(netdev, "Failed to fetch loaded profiles."); + return istatus; + } + if (istatus > 0) { + netdev_err(netdev, "DDP profile overlaps with existing one."); + return -EINVAL; + } + } else { + if (istatus == 0) { + netdev_err(netdev, + "DDP profile for deletion does not exist."); + return -EINVAL; + } + } + + /* Load profile data */ + if (is_add) { + status = i40e_write_profile(&pf->hw, profile_hdr, track_id); + if (status) { + if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) { + netdev_err(netdev, + "Profile is not supported by the device."); + return -EPERM; + } + netdev_err(netdev, "Failed to write DDP profile."); + return -EIO; + } + } else { + status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id); + if (status) { + netdev_err(netdev, "Failed to remove DDP profile."); + return -EIO; + } + } + + /* Add/remove profile to/from profile list in FW */ + if (is_add) { + status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec, + track_id); + if (status) { + netdev_err(netdev, "Failed to add DDP profile info."); + return -EIO; + } + } else { + status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec, + track_id); + if (status) { + netdev_err(netdev, "Failed to restore DDP profile info."); + return -EIO; + } + } + + return 0; +} + +/** + * i40e_ddp_restore - restore previously loaded profile and remove from list + * @pf: PF data struct + * + * Restores previously loaded profile stored on the list in driver memory. + * After rolling back removes entry from the list. + **/ +static int i40e_ddp_restore(struct i40e_pf *pf) +{ + struct i40e_ddp_old_profile_list *entry; + struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + int status = 0; + + if (!list_empty(&pf->ddp_old_prof)) { + entry = list_first_entry(&pf->ddp_old_prof, + struct i40e_ddp_old_profile_list, + list); + status = i40e_ddp_load(netdev, entry->old_ddp_buf, + entry->old_ddp_size, false); + list_del(&entry->list); + kfree(entry); + } + return status; +} + +/** + * i40e_ddp_flash - callback function for ethtool flash feature + * @netdev: net device structure + * @flash: kernel flash structure + * + * Ethtool callback function used for loading and unloading DDP profiles. + **/ +int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash) +{ + const struct firmware *ddp_config; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int status = 0; + + /* Check for valid region first */ + if (flash->region != I40_DDP_FLASH_REGION) { + netdev_err(netdev, "Requested firmware region is not recognized by this driver."); + return -EINVAL; + } + if (pf->hw.bus.func != 0) { + netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface"); + return -EINVAL; + } + + /* If the user supplied "-" instead of file name rollback previously + * stored profile. + */ + if (strncmp(flash->data, "-", 2) != 0) { + struct i40e_ddp_old_profile_list *list_entry; + char profile_name[sizeof(I40E_DDP_PROFILE_PATH) + + I40E_DDP_PROFILE_NAME_MAX]; + + profile_name[sizeof(profile_name) - 1] = 0; + strncpy(profile_name, I40E_DDP_PROFILE_PATH, + sizeof(profile_name) - 1); + strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX); + /* Load DDP recipe. */ + status = request_firmware(&ddp_config, profile_name, + &netdev->dev); + if (status) { + netdev_err(netdev, "DDP recipe file request failed."); + return status; + } + + status = i40e_ddp_load(netdev, ddp_config->data, + ddp_config->size, true); + + if (!status) { + list_entry = + kzalloc(sizeof(struct i40e_ddp_old_profile_list) + + ddp_config->size, GFP_KERNEL); + if (!list_entry) { + netdev_info(netdev, "Failed to allocate memory for previous DDP profile data."); + netdev_info(netdev, "New profile loaded but roll-back will be impossible."); + } else { + memcpy(list_entry->old_ddp_buf, + ddp_config->data, ddp_config->size); + list_entry->old_ddp_size = ddp_config->size; + list_add(&list_entry->list, &pf->ddp_old_prof); + } + } + + release_firmware(ddp_config); + } else { + if (!list_empty(&pf->ddp_old_prof)) { + status = i40e_ddp_restore(pf); + } else { + netdev_warn(netdev, "There is no DDP profile to restore."); + status = -ENOENT; + } + } + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7874d0ec7fb0..9eaea1bee4a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -535,9 +535,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 1000baseT_Full); } - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseLR4_Full); @@ -724,6 +727,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_40GBASE_SR4: ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); break; case I40E_PHY_TYPE_40GBASE_LR4: ethtool_link_ksettings_add_link_mode(ks, supported, @@ -5171,6 +5176,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_link_ksettings = i40e_set_link_ksettings, .get_fecparam = i40e_get_fec_param, .set_fecparam = i40e_set_fec_param, + .flash_device = i40e_ddp_flash, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b1c265012c8a..65c2b9d2652b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2107,11 +2107,22 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, fcnt = i40e_update_filter_state(num_add, list, add_head); if (fcnt != num_add) { - set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); - dev_warn(&vsi->back->pdev->dev, - "Error %s adding RX filters on %s, promiscuous mode forced on\n", - i40e_aq_str(hw, aq_err), - vsi_name); + if (vsi->type == I40E_VSI_MAIN) { + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); + dev_warn(&vsi->back->pdev->dev, + "Error %s adding RX filters on %s, promiscuous mode forced on\n", + i40e_aq_str(hw, aq_err), vsi_name); + } else if (vsi->type == I40E_VSI_SRIOV || + vsi->type == I40E_VSI_VMDQ1 || + vsi->type == I40E_VSI_VMDQ2) { + dev_warn(&vsi->back->pdev->dev, + "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n", + i40e_aq_str(hw, aq_err), vsi_name, vsi_name); + } else { + dev_warn(&vsi->back->pdev->dev, + "Error %s adding RX filters on %s, incorrect VSI type: %i.\n", + i40e_aq_str(hw, aq_err), vsi_name, vsi->type); + } } } @@ -2654,6 +2665,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) @@ -2684,6 +2699,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) struct i40e_vsi_context ctxt; i40e_status ret; + /* Don't modify stripping options if a port VLAN is active */ + if (vsi->info.pvid) + return; + if ((vsi->info.valid_sections & cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == @@ -6403,7 +6422,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) goto out; /* Get the initial DCB configuration */ - err = i40e_init_dcb(hw); + err = i40e_init_dcb(hw, true); if (!err) { /* Device/Function is not DCBX capable */ if ((!hw->func_caps.dcb) || @@ -6846,10 +6865,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data) struct i40e_pf *pf = vsi->back; u8 enabled_tc = 0, num_tc, hw; bool need_reset = false; + int old_queue_pairs; int ret = -EINVAL; u16 mode; int i; + old_queue_pairs = vsi->num_queue_pairs; num_tc = mqprio_qopt->qopt.num_tc; hw = mqprio_qopt->qopt.hw; mode = mqprio_qopt->mode; @@ -6950,6 +6971,7 @@ config_tc: } ret = i40e_configure_queue_channels(vsi); if (ret) { + vsi->num_queue_pairs = old_queue_pairs; netdev_info(netdev, "Failed configuring queue channels\n"); need_reset = true; @@ -9290,6 +9312,11 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) dev_warn(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); } + + /* Save the current PTP time so that we can restore the time after the + * reset completes. + */ + i40e_ptp_save_hw_time(pf); } /** @@ -13984,6 +14011,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&pf->l3_flex_pit_list); INIT_LIST_HEAD(&pf->l4_flex_pit_list); + INIT_LIST_HEAD(&pf->ddp_old_prof); /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove @@ -14042,7 +14070,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { if (err == I40E_ERR_FIRMWARE_API_VERSION) dev_info(&pdev->dev, - "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", + hw->aq.api_maj_ver, + hw->aq.api_min_ver, + I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw)); else dev_info(&pdev->dev, "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); @@ -14060,10 +14092,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) dev_info(&pdev->dev, - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n", + hw->aq.api_maj_ver, + hw->aq.api_min_ver, + I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw)); else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) dev_info(&pdev->dev, - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", + hw->aq.api_maj_ver, + hw->aq.api_min_ver, + I40E_FW_API_VERSION_MAJOR, + I40E_FW_MINOR_VERSION(hw)); i40e_verify_eeprom(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index e08d754824b1..663c8bf4d3d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -429,10 +429,16 @@ i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); +struct i40e_profile_section_header * +i40e_find_section_in_profile(u32 section_type, + struct i40e_profile_segment *profile); enum i40e_status_code i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, u32 track_id); enum i40e_status_code +i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, + u32 track_id); +enum i40e_status_code i40e_add_pinfo_to_list(struct i40e_hw *hw, struct i40e_profile_segment *profile, u8 *profile_info_sec, u32 track_id); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 31575c0bb884..439c35f0c581 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -725,16 +725,68 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + /* Set the previous "reset" time to the current Kernel clock time */ + pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real()); + pf->ptp_reset_start = ktime_get(); + return 0; } /** + * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time + * @pf: Board private structure + * + * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should + * be called at the end of preparing to reset, just before hardware reset + * occurs, in order to preserve the PTP time as close as possible across + * resets. + */ +void i40e_ptp_save_hw_time(struct i40e_pf *pf) +{ + /* don't try to access the PTP clock if it's not enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return; + + i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL); + /* Get a monotonic starting time for this reset */ + pf->ptp_reset_start = ktime_get(); +} + +/** + * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs + * @pf: Board private structure + * + * Restore the PTP hardware clock registers. We previously cached the PTP + * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible, + * update this value based on the time delta since the time was saved, using + * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference. + * + * This ensures that the hardware clock is restored to nearly what it should + * have been if a reset had not occurred. + */ +void i40e_ptp_restore_hw_time(struct i40e_pf *pf) +{ + ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start); + + /* Update the previous HW time with the ktime delta */ + timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta)); + + /* Restore the hardware clock registers */ + i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time); +} + +/** * i40e_ptp_init - Initialize the 1588 support after device probe or reset * @pf: Board private structure * * This function sets device up for 1588 support. The first time it is run, it * will create a PHC clock device. It does not create a clock device if one * already exists. It also reconfigures the device after a reset. + * + * The first time a clock is created, i40e_ptp_create_clock will set + * pf->ptp_prev_hw_time to the current system time. During resets, it is + * expected that this timespec will be set to the last known PTP clock time, + * in order to preserve the clock time as close as possible across a reset. **/ void i40e_ptp_init(struct i40e_pf *pf) { @@ -766,7 +818,6 @@ void i40e_ptp_init(struct i40e_pf *pf) dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", __func__); } else if (pf->ptp_clock) { - struct timespec64 ts; u32 regval; if (pf->hw.debug_mask & I40E_DEBUG_LAN) @@ -787,9 +838,8 @@ void i40e_ptp_init(struct i40e_pf *pf) /* reset timestamping mode */ i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); - /* Set the clock value. */ - ts = ktime_to_timespec64(ktime_get_real()); - i40e_ptp_settime(&pf->ptp_caps, &ts); + /* Restore the clock time based on last known value */ + i40e_ptp_restore_hw_time(pf); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6c97667d20ef..e1931701cd7e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2035,7 +2035,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > I40E_RX_HDR_SIZE) - headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + I40E_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, @@ -3469,7 +3470,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 2781ab91ca82..79420bcc7414 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1527,6 +1527,8 @@ struct i40e_generic_seg_header { struct i40e_metadata_segment { struct i40e_generic_seg_header header; struct i40e_ddp_version version; +#define I40E_DDP_TRACKID_RDONLY 0 +#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF u32 track_id; char name[I40E_DDP_NAME_SIZE]; }; @@ -1555,15 +1557,36 @@ struct i40e_profile_section_header { struct { #define SECTION_TYPE_INFO 0x00000010 #define SECTION_TYPE_MMIO 0x00000800 +#define SECTION_TYPE_RB_MMIO 0x00001800 #define SECTION_TYPE_AQ 0x00000801 +#define SECTION_TYPE_RB_AQ 0x00001801 #define SECTION_TYPE_NOTE 0x80000000 #define SECTION_TYPE_NAME 0x80000001 +#define SECTION_TYPE_PROTO 0x80000002 +#define SECTION_TYPE_PCTYPE 0x80000003 +#define SECTION_TYPE_PTYPE 0x80000004 u32 type; u32 offset; u32 size; } section; }; +struct i40e_profile_tlv_section_record { + u8 rtype; + u8 type; + u16 len; + u8 data[12]; +}; + +/* Generic AQ section in proflie */ +struct i40e_profile_aq_section { + u16 opcode; + u16 flags; + u8 param[16]; + u16 datalen; + u8 data[1]; +}; + struct i40e_profile_info { u32 track_id; struct i40e_ddp_version version; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 831d52bc3c9a..71cd159e7902 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2454,8 +2454,10 @@ error_param: (u8 *)&stats, sizeof(stats)); } -/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ -#define I40E_VC_MAX_MAC_ADDR_PER_VF 12 +/* If the VF is not trusted restrict the number of MAC/VLAN it can program + * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast + */ +#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1) #define I40E_VC_MAX_VLAN_PER_VF 8 /** diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h index af4f94a6541e..e5ae4a1c0cff 100644 --- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -14,7 +14,7 @@ #define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 +#define I40E_FW_API_VERSION_MINOR_X710 0x0008 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ I40E_FW_API_VERSION_MINOR_X710 : \ diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 9b4d7cec2e18..cf8be63a8a4f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1315,7 +1315,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, first->next_to_watch = tx_desc; /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index e5d6f684437e..2d140ba83781 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -17,3 +17,4 @@ ice-y := ice_main.o \ ice_txrx.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o +ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 89440775aea1..878a75182d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -34,6 +34,7 @@ #include "ice_devids.h" #include "ice_type.h" #include "ice_txrx.h" +#include "ice_dcb.h" #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" @@ -42,10 +43,21 @@ extern const char ice_drv_ver[]; #define ICE_BAR0 0 -#define ICE_DFLT_NUM_DESC 128 #define ICE_REQ_DESC_MULTIPLE 32 #define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE #define ICE_MAX_NUM_DESC 8160 +/* set default number of Rx/Tx descriptors to the minimum between + * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page + */ +#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ + ALIGN(PAGE_SIZE / \ + sizeof(union ice_32byte_rx_desc), \ + ICE_REQ_DESC_MULTIPLE)) +#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \ + ALIGN(PAGE_SIZE / \ + sizeof(struct ice_tx_desc), \ + ICE_REQ_DESC_MULTIPLE)) + #define ICE_DFLT_TRAFFIC_CLASS BIT(0) #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 @@ -114,6 +126,23 @@ extern const char ice_drv_ver[]; #define ice_for_each_q_vector(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) +#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX) + +#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ + ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_UCAST_RX | \ + ICE_PROMISC_MCAST_RX | \ + ICE_PROMISC_VLAN_TX | \ + ICE_PROMISC_VLAN_RX) + +#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) + +#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ + ICE_PROMISC_MCAST_RX | \ + ICE_PROMISC_VLAN_TX | \ + ICE_PROMISC_VLAN_RX) + struct ice_tc_info { u16 qoffset; u16 qcount_tx; @@ -123,7 +152,7 @@ struct ice_tc_info { struct ice_tc_cfg { u8 numtc; /* Total number of enabled TCs */ - u8 ena_tc; /* TX map */ + u8 ena_tc; /* Tx map */ struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; }; @@ -134,7 +163,7 @@ struct ice_res_tracker { }; struct ice_qs_cfg { - struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */ + struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ unsigned long *pf_map; unsigned long pf_map_size; unsigned int q_count; @@ -247,6 +276,7 @@ struct ice_vsi { u8 irqs_ready; u8 current_isup; /* Sync 'link up' logging */ u8 stat_offsets_loaded; + u8 vlan_ena; /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -257,26 +287,33 @@ struct ice_vsi { u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ u16 num_rxq; /* Used Rx queues */ - u16 num_desc; + u16 num_rx_desc; + u16 num_tx_desc; struct ice_tc_cfg tc_cfg; } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ struct ice_q_vector { struct ice_vsi *vsi; - cpumask_t affinity_mask; - struct napi_struct napi; - struct ice_ring_container rx; - struct ice_ring_container tx; - struct irq_affinity_notify affinity_notify; + u16 v_idx; /* index in the vsi->q_vector array. */ - u8 num_ring_tx; /* total number of Tx rings in vector */ u8 num_ring_rx; /* total number of Rx rings in vector */ - char name[ICE_INT_NAME_STR_LEN]; + u8 num_ring_tx; /* total number of Tx rings in vector */ + u8 itr_countdown; /* when 0 should adjust adaptive ITR */ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device */ u8 intrl; + + struct napi_struct napi; + + struct ice_ring_container rx; + struct ice_ring_container tx; + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[ICE_INT_NAME_STR_LEN]; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -285,7 +322,11 @@ enum ice_pf_flags { ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, + ICE_FLAG_DCB_CAPABLE, + ICE_FLAG_DCB_ENA, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, + ICE_FLAG_DISABLE_FW_LLDP, + ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -324,8 +365,8 @@ struct ice_pf { u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ - u16 num_lan_tx; /* num lan Tx queues setup */ - u16 num_lan_rx; /* num lan Rx queues setup */ + u16 num_lan_tx; /* num LAN Tx queues setup */ + u16 num_lan_rx; /* num LAN Rx queues setup */ u16 q_left_tx; /* remaining num Tx queues left unclaimed */ u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ @@ -339,6 +380,9 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded; /* has previous stats been loaded */ +#ifdef CONFIG_DCB + u16 dcbx_cap; +#endif /* CONFIG_DCB */ u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; @@ -351,12 +395,13 @@ struct ice_netdev_priv { /** * ice_irq_dynamic_ena - Enable default interrupt generation settings - * @hw: pointer to hw struct - * @vsi: pointer to vsi struct, can be NULL + * @hw: pointer to HW struct + * @vsi: pointer to VSI struct, can be NULL * @q_vector: pointer to q_vector, can be NULL */ -static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, - struct ice_q_vector *q_vector) +static inline void +ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, + struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx : ((struct ice_pf *)hw->back)->hw_oicr_idx; @@ -374,12 +419,6 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, wr32(hw, GLINT_DYN_CTL(vector), val); } -static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) -{ - vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; - vsi->tc_cfg.numtc = 1; -} - void ice_set_ethtool_ops(struct net_device *netdev); int ice_up(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi); @@ -388,5 +427,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_napi_del(struct ice_vsi *vsi); +#ifdef CONFIG_DCB +int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked); +void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked); +#endif /* CONFIG_DCB */ #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 242c78469181..583f92d4db4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -62,7 +62,7 @@ struct ice_aqc_req_res { #define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 #define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 #define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 - /* For SDP: pin id of the SDP */ + /* For SDP: pin ID of the SDP */ __le32 res_number; /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ __le16 status; @@ -747,6 +747,32 @@ struct ice_aqc_delete_elem { __le32 teid[1]; }; +/* Query Port ETS (indirect 0x040E) + * + * This indirect command is used to query port TC node configuration. + */ +struct ice_aqc_query_port_ets { + __le32 port_teid; + __le32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_port_ets_elem { + u8 tc_valid_bits; + u8 reserved[3]; + /* 3 bits for UP per TC 0-7, 4th byte reserved */ + __le32 up2tc; + u8 tc_bw_share[8]; + __le32 port_eir_prof_id; + __le32 port_cir_prof_id; + /* 3 bits per Node priority to TC 0-7, 4th byte reserved */ + __le32 tc_node_prio; +#define ICE_TC_NODE_PRIO_S 0x4 + u8 reserved1[4]; + __le32 tc_node_teid[8]; /* Used for response, reserved in command */ +}; + /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. @@ -953,8 +979,9 @@ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; -#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) -#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) +#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0) +#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) +#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) #define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) #define ICE_AQ_PHY_ENA_LINK BIT(3) #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) @@ -1023,7 +1050,7 @@ struct ice_aqc_get_link_status_data { u8 ext_info; #define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0) #define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ - /* Port TX Suspended */ + /* Port Tx Suspended */ #define ICE_AQ_LINK_TX_S 2 #define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S) #define ICE_AQ_LINK_TX_ACTIVE 0 @@ -1119,9 +1146,9 @@ struct ice_aqc_nvm { }; /** - * Send to PF command (indirect 0x0801) id is only used by PF + * Send to PF command (indirect 0x0801) ID is only used by PF * - * Send to VF command (indirect 0x0802) id is only used by PF + * Send to VF command (indirect 0x0802) ID is only used by PF * */ struct ice_aqc_pf_vf_msg { @@ -1131,6 +1158,126 @@ struct ice_aqc_pf_vf_msg { __le32 addr_low; }; +/* Get LLDP MIB (indirect 0x0A00) + * Note: This is also used by the LLDP MIB Change Event (0x0A01) + * as the format is the same. + */ +struct ice_aqc_lldp_get_mib { + u8 type; +#define ICE_AQ_LLDP_MIB_TYPE_S 0 +#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S) +#define ICE_AQ_LLDP_MIB_LOCAL 0 +#define ICE_AQ_LLDP_MIB_REMOTE 1 +#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2 +#define ICE_AQ_LLDP_BRID_TYPE_S 2 +#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S) +#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0 +#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1 +/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */ +#define ICE_AQ_LLDP_TX_S 0x4 +#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S) +#define ICE_AQ_LLDP_TX_ACTIVE 0 +#define ICE_AQ_LLDP_TX_SUSPENDED 1 +#define ICE_AQ_LLDP_TX_FLUSHED 3 +/* The following bytes are reserved for the Get LLDP MIB command (0x0A00) + * and in the LLDP MIB Change Event (0x0A01). They are valid for the + * Get LLDP MIB (0x0A00) response only. + */ + u8 reserved1; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Configure LLDP MIB Change Event (direct 0x0A01) */ +/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */ +struct ice_aqc_lldp_set_mib_change { + u8 command; +#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 + u8 reserved[15]; +}; + +/* Stop LLDP (direct 0x0A05) */ +struct ice_aqc_lldp_stop { + u8 command; +#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0) +#define ICE_AQ_LLDP_AGENT_STOP 0x0 +#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK +#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1) + u8 reserved[15]; +}; + +/* Start LLDP (direct 0x0A06) */ +struct ice_aqc_lldp_start { + u8 command; +#define ICE_AQ_LLDP_AGENT_START BIT(0) +#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1) + u8 reserved[15]; +}; + +/* Get CEE DCBX Oper Config (0x0A07) + * The command uses the generic descriptor struct and + * returns the struct below as an indirect response. + */ +struct ice_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; +#define ICE_AQC_CEE_APP_FCOE_S 0 +#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S) +#define ICE_AQC_CEE_APP_ISCSI_S 3 +#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S) +#define ICE_AQC_CEE_APP_FIP_S 8 +#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S) + __le32 tlv_status; +#define ICE_AQC_CEE_PG_STATUS_S 0 +#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S) +#define ICE_AQC_CEE_PFC_STATUS_S 3 +#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S) +#define ICE_AQC_CEE_FCOE_STATUS_S 8 +#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S) +#define ICE_AQC_CEE_ISCSI_STATUS_S 11 +#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S) +#define ICE_AQC_CEE_FIP_STATUS_S 16 +#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S) + u8 reserved[12]; +}; + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct ice_aqc_lldp_set_local_mib { + u8 type; +#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0) +#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0 +#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1) +#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0 +#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 addr_high; + __le32 addr_low; +}; + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx. + * The same structure is used for the response, with the command field + * being used as the status field. + */ +struct ice_aqc_lldp_stop_start_specific_agent { + u8 command; +#define ICE_AQC_START_STOP_AGENT_M BIT(0) +#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0 +#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M + u8 reserved[15]; +}; + /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) @@ -1185,7 +1332,7 @@ struct ice_aqc_get_set_rss_lut { __le32 addr_low; }; -/* Add TX LAN Queues (indirect 0x0C30) */ +/* Add Tx LAN Queues (indirect 0x0C30) */ struct ice_aqc_add_txqs { u8 num_qgrps; u8 reserved[3]; @@ -1194,7 +1341,7 @@ struct ice_aqc_add_txqs { __le32 addr_low; }; -/* This is the descriptor of each queue entry for the Add TX LAN Queues +/* This is the descriptor of each queue entry for the Add Tx LAN Queues * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. */ struct ice_aqc_add_txqs_perq { @@ -1206,7 +1353,7 @@ struct ice_aqc_add_txqs_perq { struct ice_aqc_txsched_elem info; }; -/* The format of the command buffer for Add TX LAN Queues (0x0C30) +/* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_tx_qgrp is variable due * to the variable number of queues in each group! @@ -1218,7 +1365,7 @@ struct ice_aqc_add_tx_qgrp { struct ice_aqc_add_txqs_perq txqs[1]; }; -/* Disable TX LAN Queues (indirect 0x0C31) */ +/* Disable Tx LAN Queues (indirect 0x0C31) */ struct ice_aqc_dis_txqs { u8 cmd_type; #define ICE_AQC_Q_DIS_CMD_S 0 @@ -1240,7 +1387,7 @@ struct ice_aqc_dis_txqs { __le32 addr_low; }; -/* The buffer for Disable TX LAN Queues (indirect 0x0C31) +/* The buffer for Disable Tx LAN Queues (indirect 0x0C31) * contains the following structures, arrayed one after the * other. * Note: Since the q_id is 16 bits wide, if the @@ -1387,8 +1534,15 @@ struct ice_aq_desc { struct ice_aqc_get_topo get_topo; struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; + struct ice_aqc_query_port_ets port_ets; struct ice_aqc_nvm nvm; struct ice_aqc_pf_vf_msg virt; + struct ice_aqc_lldp_get_mib lldp_get_mib; + struct ice_aqc_lldp_set_mib_change lldp_set_event; + struct ice_aqc_lldp_stop lldp_stop; + struct ice_aqc_lldp_start lldp_start; + struct ice_aqc_lldp_set_local_mib lldp_set_mib; + struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; @@ -1421,6 +1575,8 @@ struct ice_aq_desc { /* error codes */ enum ice_aq_err { ICE_AQ_RC_OK = 0, /* Success */ + ICE_AQ_RC_EPERM = 1, /* Operation not permitted */ + ICE_AQ_RC_ENOENT = 2, /* No such element */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ @@ -1473,6 +1629,7 @@ enum ice_adminq_opc { ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, + ice_aqc_opc_query_port_ets = 0x040E, ice_aqc_opc_delete_sched_elems = 0x040F, ice_aqc_opc_query_sched_res = 0x0412, @@ -1490,6 +1647,14 @@ enum ice_adminq_opc { /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, ice_mbx_opc_send_msg_to_vf = 0x0802, + /* LLDP commands */ + ice_aqc_opc_lldp_get_mib = 0x0A00, + ice_aqc_opc_lldp_set_mib_change = 0x0A01, + ice_aqc_opc_lldp_stop = 0x0A05, + ice_aqc_opc_lldp_start = 0x0A06, + ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, + ice_aqc_opc_lldp_set_local_mib = 0x0A08, + ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, @@ -1497,7 +1662,7 @@ enum ice_adminq_opc { ice_aqc_opc_get_rss_key = 0x0B04, ice_aqc_opc_get_rss_lut = 0x0B05, - /* TX queue handling commands/events */ + /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 63f003441300..2937c6be1aee 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -31,7 +31,7 @@ * @hw: pointer to the HW structure * * This function sets the MAC type of the adapter based on the - * vendor ID and device ID stored in the hw structure. + * vendor ID and device ID stored in the HW structure. */ static enum ice_status ice_set_mac_type(struct ice_hw *hw) { @@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) /** * ice_aq_manage_mac_read - manage MAC address read command - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @buf: a virtual buffer to hold the manage MAC read response * @buf_size: Size of the virtual buffer * @cd: pointer to command details structure or NULL @@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) * * Get Link Status (0x607). Returns the link status of the adapter. */ -static enum ice_status +enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { @@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; - return status; + return 0; } /** @@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) */ case ICE_RXDID_FLEX_NIC: case ICE_RXDID_FLEX_NIC_2: - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, - ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_FIN, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG, + ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI, + ICE_FLG_FIN, idx++); /* flex flag 1 is not used for flexi-flag programming, skipping * these four FLG64 bits. */ - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, - ICE_RXFLG_EVLAN_x9100, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, - ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, - ICE_RXFLG_TNL0, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST, + ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI, + ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100, + ICE_FLG_EVLAN_x9100, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100, + ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC, + ICE_FLG_TNL0, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2, + ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx); break; default: @@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) /** * ice_init_fltr_mgmt_struct - initializes filter management list and locks - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct */ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { @@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) /** * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct */ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { @@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) /** * ice_cfg_fw_log - configure FW logging - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @enable: enable certain FW logging events if true, disable all if false * * This function enables/disables the FW logging via Rx CQ events and a UART @@ -626,7 +626,7 @@ out: /** * ice_output_fw_log - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @desc: pointer to the AQ message descriptor * @buf: pointer to the buffer accompanying the AQ message * @@ -642,7 +642,7 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) /** * ice_get_itr_intrl_gran - determine int/intrl granularity - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Determines the itr/intrl granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. @@ -731,7 +731,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) goto err_unroll_cqinit; } - /* set the back pointer to hw */ + /* set the back pointer to HW */ hw->port_info->hw = hw; /* Initialize port_info struct with switch configuration data */ @@ -988,7 +988,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * @ice_rxq_ctx: pointer to the rxq context * @rxq_index: the index of the Rx queue * - * Copies rxq context from dense structure to hw register space + * Copies rxq context from dense structure to HW register space */ static enum ice_status ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) @@ -1001,7 +1001,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) if (rxq_index > QRX_CTRL_MAX_INDEX) return ICE_ERR_PARAM; - /* Copy each dword separately to hw */ + /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { wr32(hw, QRX_CONTEXT(i, rxq_index), *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); @@ -1045,7 +1045,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to hw register space + * it to HW register space */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1100,8 +1100,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { * * Dumps debug log about control command with descriptor contents. */ -void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, - void *buf, u16 buf_len) +void +ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf, + u16 buf_len) { struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; u16 len; @@ -1143,7 +1144,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @desc: descriptor describing the command * @buf: buffer to use for indirect commands (NULL for direct commands) * @buf_size: size of buffer for indirect commands (0 for direct commands) @@ -1160,7 +1161,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, /** * ice_aq_get_fw_ver - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cd: pointer to command details structure or NULL * * Get the firmware version (0x0001) from the admin queue commands @@ -1194,7 +1195,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) /** * ice_aq_q_shutdown - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether @@ -1217,8 +1218,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) /** * ice_aq_req_res - * @hw: pointer to the hw struct - * @res: resource id + * @hw: pointer to the HW struct + * @res: resource ID * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource @@ -1303,8 +1304,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, /** * ice_aq_release_res - * @hw: pointer to the hw struct - * @res: resource id + * @hw: pointer to the HW struct + * @res: resource ID * @sdp_number: resource number * @cd: pointer to command details structure or NULL * @@ -1330,7 +1331,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, /** * ice_acquire_res * @hw: pointer to the HW structure - * @res: resource id + * @res: resource ID * @access: access type (read or write) * @timeout: timeout in milliseconds * @@ -1392,7 +1393,7 @@ ice_acquire_res_exit: /** * ice_release_res * @hw: pointer to the HW structure - * @res: resource id + * @res: resource ID * * This function will release a resource using the proper Admin Command. */ @@ -1404,7 +1405,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource - * results in an admin Q timeout, so handle them correctly + * results in an admin queue timeout, so handle them correctly */ while ((status == ICE_ERR_AQ_TIMEOUT) && (total_delay < hw->adminq.sq_cmd_timeout)) { @@ -1415,13 +1416,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) } /** - * ice_get_guar_num_vsi - determine number of guar VSI for a PF - * @hw: pointer to the hw structure + * ice_get_num_per_func - determine number of resources per PF + * @hw: pointer to the HW structure + * @max: value to be evenly split between each PF * * Determine the number of valid functions by going through the bitmap returned - * from parsing capabilities and use this to calculate the number of VSI per PF. + * from parsing capabilities and use this to calculate the number of resources + * per PF based on the max value passed in. */ -static u32 ice_get_guar_num_vsi(struct ice_hw *hw) +static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) { u8 funcs; @@ -1432,12 +1435,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw) if (!funcs) return 0; - return ICE_MAX_VSI / funcs; + return max / funcs; } /** * ice_parse_caps - parse function/device capabilities - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @buf: pointer to a buffer containing function/device capability records * @cap_count: number of capability records in the list * @opc: type of capabilities list to parse @@ -1512,7 +1515,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, "HW caps: Dev.VSI cnt = %d\n", dev_p->num_vsi_allocd_to_host); } else if (func_p) { - func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); + func_p->guar_num_vsi = + ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, "HW caps: Func.VSI cnt = %d\n", number); @@ -1578,7 +1582,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, /** * ice_aq_discover_caps - query function/device capabilities - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @buf: a virtual buffer to hold the capabilities * @buf_size: Size of the virtual buffer * @cap_count: cap count needed if AQ err==ENOMEM @@ -1617,8 +1621,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * @hw: pointer to the hardware structure * @opc: capabilities type to discover - pass in the command opcode */ -static enum ice_status ice_discover_caps(struct ice_hw *hw, - enum ice_adminq_opc opc) +static enum ice_status +ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) { enum ice_status status; u32 cap_count; @@ -1677,7 +1681,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) /** * ice_aq_manage_mac_write - manage MAC address write command - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address * @flags: flags to control write behavior * @cd: pointer to command details structure or NULL @@ -1705,7 +1709,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, /** * ice_aq_clear_pxe_mode - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Tell the firmware that the driver is taking over from PXE (0x0110). */ @@ -1721,7 +1725,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) /** * ice_clear_pxe_mode - clear pxe operations mode - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. @@ -1737,10 +1741,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * @phy_type_low: lower part of phy_type * @phy_type_high: higher part of phy_type * - * This helper function will convert an entry in phy type structure + * This helper function will convert an entry in PHY type structure * [phy_type_low, phy_type_high] to its corresponding link speed. * Note: In the structure of [phy_type_low, phy_type_high], there should - * be one bit set, as this function will convert one phy type to its + * be one bit set, as this function will convert one PHY type to its * speed. * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned @@ -1910,7 +1914,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, /** * ice_aq_set_phy_cfg - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @lport: logical port number * @cfg: structure with PHY configuration data to be set * @cd: pointer to command details structure or NULL @@ -1929,6 +1933,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, if (!cfg) return ICE_ERR_PARAM; + /* Ensure that only valid bits of cfg->caps can be turned on. */ + if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { + ice_debug(hw, ICE_DBG_PHY, + "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", + cfg->caps); + + cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; + } + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); desc.params.set_phy.lport_num = lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -2016,7 +2029,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) if (!pcaps) return ICE_ERR_NO_MEMORY; - /* Get the current phy config */ + /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, NULL); if (status) { @@ -2027,8 +2040,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) /* clear the old pause settings */ cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | ICE_AQC_PHY_EN_RX_LINK_PAUSE); + /* set the new capabilities */ cfg.caps |= pause_mask; + /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { int retry_count, retry_max = 10; @@ -2136,6 +2151,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, } /** + * ice_aq_set_event_mask + * @hw: pointer to the HW struct + * @port_num: port number of the physical function + * @mask: event mask to be set + * @cd: pointer to command details structure or NULL + * + * Set event mask (0x0613) + */ +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_event_mask *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_event_mask; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); + + cmd->lport_num = port_num; + + cmd->event_mask = cpu_to_le16(mask); + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** * ice_aq_set_port_id_led * @pi: pointer to the port information * @is_orig_mode: is this LED set to original mode (by the net-list) @@ -2297,7 +2338,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, /** * __ice_aq_get_set_rss_key - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_id: VSI FW index * @key: pointer to key info struct * @set: set true to set the key, false to get the key @@ -2332,7 +2373,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, /** * ice_aq_get_rss_key - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @key: pointer to key info struct * @@ -2351,7 +2392,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, /** * ice_aq_set_rss_key - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @keys: pointer to key info struct * @@ -2436,7 +2477,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes - * @rst_src: if called due to reset, specifies the RST source + * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * @@ -2476,7 +2517,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, break; case ICE_VF_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; - /* In this case, FW expects vmvf_num to be absolute VF id */ + /* In this case, FW expects vmvf_num to be absolute VF ID */ cmd->vmvf_and_timeout |= cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & ICE_AQC_Q_DIS_VMVF_NUM_M); @@ -2534,8 +2575,8 @@ do_aq: * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u8 src_byte, dest_byte, mask; u8 *from, *dest; @@ -2573,8 +2614,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u16 src_word, mask; __le16 dest_word; @@ -2616,8 +2657,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u32 src_dword, mask; __le32 dest_dword; @@ -2667,8 +2708,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ -static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +static void +ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u64 src_qword, mask; __le64 dest_qword; @@ -2753,13 +2794,13 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * ice_ena_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle - * @tc: tc number + * @tc: TC number * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * - * This function adds one lan q + * This function adds one LAN queue */ enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, @@ -2803,11 +2844,11 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, * Bit 5-6. * - Bit 7 is reserved. * Without setting the generic section as valid in valid_sections, the - * Admin Q command will fail with error code ICE_AQ_RC_EINVAL. + * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; - /* add the lan q */ + /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); if (status) { ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", @@ -2819,7 +2860,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; - /* add a leaf node into schduler tree q layer */ + /* add a leaf node into schduler tree queue layer */ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); ena_txq_exit: @@ -2833,7 +2874,7 @@ ena_txq_exit: * @num_queues: number of queues * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids - * @rst_src: if called due to reset, specifies the RST source + * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * @@ -2884,12 +2925,12 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, } /** - * ice_cfg_vsi_qs - configure the new/exisiting VSI queues + * ice_cfg_vsi_qs - configure the new/existing VSI queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @maxqs: max queues array per TC - * @owner: lan or rdma + * @owner: LAN or RDMA * * This function adds/updates the VSI queues per TC. */ @@ -2908,7 +2949,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, mutex_lock(&pi->sched_lock); - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { /* configuration is possible only if TC node is present */ if (!ice_sched_get_tc_node(pi, i)) continue; @@ -2924,13 +2965,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, } /** - * ice_cfg_vsi_lan - configure VSI lan queues + * ice_cfg_vsi_lan - configure VSI LAN queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap - * @max_lanqs: max lan queues array per TC + * @max_lanqs: max LAN queues array per TC * - * This function adds/updates the VSI lan queues per TC. + * This function adds/updates the VSI LAN queues per TC. */ enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, @@ -2942,7 +2983,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, /** * ice_replay_pre_init - replay pre initialization - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ @@ -2966,7 +3007,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) /** * ice_replay_vsi - replay VSI configuration - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: driver VSI handle * * Restore all VSI configuration after reset. It is required to call this @@ -2993,7 +3034,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) /** * ice_replay_post - post replay configuration cleanup - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Post replay cleanup. */ @@ -3012,8 +3053,9 @@ void ice_replay_post(struct ice_hw *hw) * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ -void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +void +ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u64 new_data; @@ -3043,8 +3085,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ -void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat) +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { u32 new_data; @@ -3063,3 +3106,28 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, /* to manage the potential roll-over */ *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; } + +/** + * ice_sched_query_elem - query element information from HW + * @hw: pointer to the HW struct + * @node_teid: node TEID to be queried + * @buf: buffer to element information + * + * This function queries HW element information + */ +enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf) +{ + u16 buf_size, num_elem_ret = 0; + enum ice_status status; + + buf_size = sizeof(*buf); + memset(buf, 0, buf_size); + buf->generic[0].node_teid = cpu_to_le32(node_teid); + status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, + NULL); + if (status || num_elem_ret != 1) + ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d7c7c2ed8823..faefc45e4a1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -9,8 +9,8 @@ #include "ice_switch.h" #include <linux/avf/virtchnl.h> -void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, - u16 buf_len); +void +ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); @@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); -enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, - u16 *data); +enum ice_status +ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data); enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, @@ -89,6 +89,12 @@ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status +ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, + struct ice_link_status *link, struct ice_sq_cd *cd); +enum ice_status +ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, + struct ice_sq_cd *cd); +enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); @@ -106,8 +112,13 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); -void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, - u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, + bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); +void +ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat); +enum ice_status +ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, + struct ice_aqc_get_elem *buf); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 2bf5e11f559a..cc8cb5fdcdc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw) /** * ice_check_sq_alive - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if Queue is enabled else false. @@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * - * Configure base address and length registers for the receive (event q) + * Configure base address and length registers for the receive (event queue) */ static enum ice_status ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) @@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) /** * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if the firmware has processed all descriptors on the @@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) /** * ice_sq_send_cmd - send command to Control Queue (ATQ) - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command (non DMA mem) * @buf: buffer to use for indirect commands (or NULL for direct commands) @@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) /** * ice_clean_rq_elem - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c new file mode 100644 index 000000000000..8bbf48e04a1c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -0,0 +1,1392 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_sched.h" +#include "ice_dcb.h" + +/** + * ice_aq_get_lldp_mib + * @hw: pointer to the HW struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @local_len: length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cd: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). (0x0A00) + */ +static enum ice_status +ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, + u16 buf_size, u16 *local_len, u16 *remote_len, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_get_mib *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.lldp_get_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); + + cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M; + cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & + ICE_AQ_LLDP_BRID_TYPE_M; + + desc.datalen = cpu_to_le16(buf_size); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (!status) { + if (local_len) + *local_len = le16_to_cpu(cmd->local_len); + if (remote_len) + *remote_len = le16_to_cpu(cmd->remote_len); + } + + return status; +} + +/** + * ice_aq_cfg_lldp_mib_change + * @hw: pointer to the HW struct + * @ena_update: Enable or Disable event posting + * @cd: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes (0x0A01) + */ +enum ice_status +ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_mib_change *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_event; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change); + + if (!ena_update) + cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_stop_lldp + * @hw: pointer to the HW struct + * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown + * False if LLDP Agent needs to be Stopped + * @cd: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent (0x0A05) + */ +enum ice_status +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_stop *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_stop; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop); + + if (shutdown_lldp_agent) + cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_start_lldp + * @hw: pointer to the HW struct + * @cd: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. (0x0A06) + */ +enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_start *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_start; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start); + + cmd->command = ICE_AQ_LLDP_AGENT_START; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the HW struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @cd: pointer to command details structure or NULL + * + * Set the LLDP MIB. (0x0A08) + */ +static enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_local_mib *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); + + desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); + desc.datalen = cpu_to_le16(buf_size); + + cmd->type = mib_type; + cmd->length = cpu_to_le16(buf_size); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + +/** + * ice_get_dcbx_status + * @hw: pointer to the HW struct + * + * Get the DCBX status from the Firmware + */ +u8 ice_get_dcbx_status(struct ice_hw *hw) +{ + u32 reg; + + reg = rd32(hw, PRTDCB_GENS); + return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >> + PRTDCB_GENS_DCBX_STATUS_S); +} + +/** + * ice_parse_ieee_ets_common_tlv + * @buf: Data buffer to be parsed for ETS CFG/REC data + * @ets_cfg: Container to store parsed data + * + * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV + */ +static void +ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) +{ + u8 offset = 0; + int i; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + ets_cfg->prio_table[i * 2] = + ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >> + ICE_IEEE_ETS_PRIO_1_S); + ets_cfg->prio_table[i * 2 + 1] = + ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >> + ICE_IEEE_ETS_PRIO_0_S); + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + * + * TSA Assignment Table (8 octets) + * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16| + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) { + ets_cfg->tcbwtable[i] = buf[offset]; + ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++]; + } +} + +/** + * ice_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + */ +static void +ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >> + ICE_IEEE_ETS_WILLING_S); + etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S); + etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >> + ICE_IEEE_ETS_MAXTC_S); + + /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ + ice_parse_ieee_ets_common_tlv(&buf[1], etscfg); +} + +/** + * ice_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + */ +static void +ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ + ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec); +} + +/** + * ice_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + */ +static void +ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >> + ICE_IEEE_PFC_WILLING_S); + dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S); + dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >> + ICE_IEEE_PFC_CAP_S); + dcbcfg->pfc.pfcena = buf[1]; +} + +/** + * ice_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + */ +static void +ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u16 offset = 0; + u16 typelen; + int i = 0; + u16 len; + u8 *buf; + + typelen = ntohs(tlv->typelen); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + buf = tlv->tlvinfo; + + /* Removing sizeof(ouisubtype) and reserved byte from len. + * Remaining len div 3 is number of APP TLVs. + */ + len -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < len) { + dcbcfg->app[i].priority = ((buf[offset] & + ICE_IEEE_APP_PRIO_M) >> + ICE_IEEE_APP_PRIO_S); + dcbcfg->app[i].selector = ((buf[offset] & + ICE_IEEE_APP_SEL_M) >> + ICE_IEEE_APP_SEL_S); + dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= ICE_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * ice_parse_ieee_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + */ +static void +ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> + ICE_LLDP_TLV_SUBTYPE_S); + switch (subtype) { + case ICE_IEEE_SUBTYPE_ETS_CFG: + ice_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_ETS_REC: + ice_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_PFC_CFG: + ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_SUBTYPE_APP_PRI: + ice_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_parse_cee_pgcfg_tlv + * @tlv: CEE DCBX PG CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses CEE DCBX PG CFG TLV + */ +static void +ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + int i; + + etscfg = &dcbcfg->etscfg; + + if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) + etscfg->willing = 1; + + etscfg->cbs = 0; + /* Priority Group Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + etscfg->prio_table[i * 2] = + ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >> + ICE_CEE_PGID_PRIO_1_S); + etscfg->prio_table[i * 2 + 1] = + ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >> + ICE_CEE_PGID_PRIO_0_S); + offset++; + } + + /* PG Percentage Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) + etscfg->tcbwtable[i] = buf[offset++]; + + /* Number of TCs supported (1 octet) */ + etscfg->maxtcs = buf[offset]; +} + +/** + * ice_parse_cee_pfccfg_tlv + * @tlv: CEE DCBX PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses CEE DCBX PFC CFG TLV + */ +static void +ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) + dcbcfg->pfc.willing = 1; + + /* ------------------------ + * | PFC Enable | PFC TCs | + * ------------------------ + * | 1 octet | 1 octet | + */ + dcbcfg->pfc.pfcena = buf[0]; + dcbcfg->pfc.pfccap = buf[1]; +} + +/** + * ice_parse_cee_app_tlv + * @tlv: CEE DCBX APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses CEE DCBX APP PRIO TLV + */ +static void +ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u16 len, typelen, offset = 0; + struct ice_cee_app_prio *app; + u8 i; + + typelen = ntohs(tlv->hdr.typelen); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + + dcbcfg->numapps = len / sizeof(*app); + if (!dcbcfg->numapps) + return; + if (dcbcfg->numapps > ICE_DCBX_MAX_APPS) + dcbcfg->numapps = ICE_DCBX_MAX_APPS; + + for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + + app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset); + for (up = 0; up < ICE_MAX_USER_PRIORITY; up++) + if (app->prio_map & BIT(up)) + break; + + dcbcfg->app[i].priority = up; + + /* Get Selector from lower 2 bits, and convert to IEEE */ + selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M); + switch (selector) { + case ICE_CEE_APP_SEL_ETHTYPE: + dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE; + break; + case ICE_CEE_APP_SEL_TCPIP: + dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP; + break; + default: + /* Keep selector as it is for unknown types */ + dcbcfg->app[i].selector = selector; + } + + dcbcfg->app[i].prot_id = ntohs(app->protocol); + /* Move to next app */ + offset += sizeof(*app); + } +} + +/** + * ice_parse_cee_tlv + * @tlv: CEE DCBX TLV + * @dcbcfg: Local store to update DCBX config data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + */ +static void +ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_cee_feat_tlv *sub_tlv; + u8 subtype, feat_tlv_count = 0; + u16 len, tlvlen, typelen; + u32 ouisubtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> + ICE_LLDP_TLV_SUBTYPE_S); + /* Return if not CEE DCBX */ + if (subtype != ICE_CEE_DCBX_TYPE) + return; + + typelen = ntohs(tlv->typelen); + tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + len = sizeof(tlv->typelen) + sizeof(ouisubtype) + + sizeof(struct ice_cee_ctrl_tlv); + /* Return if no CEE DCBX Feature TLVs */ + if (tlvlen <= len) + return; + + sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len); + while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) { + u16 sublen; + + typelen = ntohs(sub_tlv->hdr.typelen); + sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> + ICE_LLDP_TLV_TYPE_S); + switch (subtype) { + case ICE_CEE_SUBTYPE_PG_CFG: + ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); + break; + case ICE_CEE_SUBTYPE_PFC_CFG: + ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); + break; + case ICE_CEE_SUBTYPE_APP_PRI: + ice_parse_cee_app_tlv(sub_tlv, dcbcfg); + break; + default: + return; /* Invalid Sub-type return */ + } + feat_tlv_count++; + /* Move to next sub TLV */ + sub_tlv = (struct ice_cee_feat_tlv *) + ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + + sublen); + } +} + +/** + * ice_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + */ +static void +ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = ntohl(tlv->ouisubtype); + oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S); + switch (oui) { + case ICE_IEEE_8021QAZ_OUI: + ice_parse_ieee_tlv(tlv, dcbcfg); + break; + case ICE_CEE_DCBX_OUI: + ice_parse_cee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_lldp_to_dcb_cfg + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + */ +enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_lldp_org_tlv *tlv; + enum ice_status ret = 0; + u16 offset = 0; + u16 typelen; + u16 type; + u16 len; + + if (!lldpmib || !dcbcfg) + return ICE_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += ETH_HLEN; + tlv = (struct ice_lldp_org_tlv *)lldpmib; + while (1) { + typelen = ntohs(tlv->typelen); + type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); + len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); + offset += sizeof(typelen) + len; + + /* END TLV or beyond LLDPDU size */ + if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE) + break; + + switch (type) { + case ICE_TLV_TYPE_ORG: + ice_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + } + + return ret; +} + +/** + * ice_aq_get_dcb_cfg + * @hw: pointer to the HW struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the firmware + */ +static enum ice_status +ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, + struct ice_dcbx_cfg *dcbcfg) +{ + enum ice_status ret; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); + if (!lldpmib) + return ICE_ERR_NO_MEMORY; + + ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, + ICE_LLDPDU_SIZE, NULL, NULL, NULL); + + if (!ret) + /* Parse LLDP MIB to get DCB configuration */ + ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg); + + devm_kfree(ice_hw_to_dev(hw), lldpmib); + + return ret; +} + +/** + * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW + * @hw: pointer to the HW struct + * @start_dcbx_agent: True if DCBx Agent needs to be started + * False if DCBx Agent needs to be stopped + * @dcbx_agent_status: FW indicates back the DCBx agent status + * True if DCBx Agent is active + * False if DCBx Agent is stopped + * @cd: pointer to command details structure or NULL + * + * Start/Stop the embedded dcbx Agent. In case that this wrapper function + * returns ICE_SUCCESS, caller will need to check if FW returns back the same + * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) + */ +enum ice_status +ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, + bool *dcbx_agent_status, struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_stop_start_specific_agent *cmd; + enum ice_status status; + struct ice_aq_desc desc; + u16 opcode; + + cmd = &desc.params.lldp_agent_ctrl; + + opcode = ice_aqc_opc_lldp_stop_start_specific_agent; + + ice_fill_dflt_direct_cmd_desc(&desc, opcode); + + if (start_dcbx_agent) + cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + + *dcbx_agent_status = false; + + if (!status && + cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX) + *dcbx_agent_status = true; + + return status; +} + +/** + * ice_aq_get_cee_dcb_cfg + * @hw: pointer to the HW struct + * @buff: response buffer that stores CEE operational configuration + * @cd: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware (0x0A07) + */ +static enum ice_status +ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, + struct ice_aqc_get_cee_dcb_cfg_resp *buff, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg); + + return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd); +} + +/** + * ice_cee_to_dcb_cfg + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + */ +static void +ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct ice_dcbx_cfg *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, err, sync, oper, app_index, ice_app_sel_type; + u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; + u16 ice_app_prot_id_type; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + /* Note that the FW creates the oper_prio_tc nibbles reversed + * from those in the CEE Priority Group sub-TLV. + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { + dcbcfg->etscfg.prio_table[i * 2] = + ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >> + ICE_CEE_PGID_PRIO_0_S); + dcbcfg->etscfg.prio_table[i * 2 + 1] = + ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >> + ICE_CEE_PGID_PRIO_1_S); + } + + ice_for_each_traffic_class(i) { + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; + + app_index = 0; + for (i = 0; i < 3; i++) { + if (i == 0) { + /* FCoE APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S; + ice_app_sel_type = ICE_APP_SEL_ETHTYPE; + ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE; + } else if (i == 1) { + /* iSCSI APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; + ice_app_sel_type = ICE_APP_SEL_TCPIP; + ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; + } else { + /* FIP APP */ + ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; + ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S; + ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M; + ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S; + ice_app_sel_type = ICE_APP_SEL_ETHTYPE; + ice_app_prot_id_type = ICE_APP_PROT_ID_FIP; + } + + status = (tlv_status & ice_aqc_cee_status_mask) >> + ice_aqc_cee_status_shift; + err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0; + /* Add FCoE/iSCSI/FIP APP if Error is False and + * Oper/Sync is True + */ + if (!err && sync && oper) { + dcbcfg->app[app_index].priority = + (app_prio & ice_aqc_cee_app_mask) >> + ice_aqc_cee_app_shift; + dcbcfg->app[app_index].selector = ice_app_sel_type; + dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; + app_index++; + } + } + + dcbcfg->numapps = app_index; +} + +/** + * ice_get_ieee_dcb_cfg + * @pi: port information structure + * @dcbx_mode: mode of DCBX (IEEE or CEE) + * + * Get IEEE or CEE mode DCB configuration from the Firmware + */ +static enum ice_status +ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) +{ + struct ice_dcbx_cfg *dcbx_cfg = NULL; + enum ice_status ret; + + if (!pi) + return ICE_ERR_PARAM; + + if (dcbx_mode == ICE_DCBX_MODE_IEEE) + dcbx_cfg = &pi->local_dcbx_cfg; + else if (dcbx_mode == ICE_DCBX_MODE_CEE) + dcbx_cfg = &pi->desired_dcbx_cfg; + + /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE + * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE + */ + ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); + if (ret) + goto out; + + /* Get Remote DCB Config */ + dcbx_cfg = &pi->remote_dcbx_cfg; + ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, + ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) + ret = 0; + +out: + return ret; +} + +/** + * ice_get_dcb_cfg + * @pi: port information structure + * + * Get DCB configuration from the Firmware + */ +enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) +{ + struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct ice_dcbx_cfg *dcbx_cfg; + enum ice_status ret; + + if (!pi) + return ICE_ERR_PARAM; + + ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); + if (!ret) { + /* CEE mode */ + dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE; + dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status); + ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg); + ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); + } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { + /* CEE mode not enabled try querying IEEE data */ + dcbx_cfg = &pi->local_dcbx_cfg; + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE); + } + + return ret; +} + +/** + * ice_init_dcb + * @hw: pointer to the HW struct + * + * Update DCB configuration from the Firmware + */ +enum ice_status ice_init_dcb(struct ice_hw *hw) +{ + struct ice_port_info *pi = hw->port_info; + enum ice_status ret = 0; + + if (!hw->func_caps.common_cap.dcb) + return ICE_ERR_NOT_SUPPORTED; + + pi->is_sw_lldp = true; + + /* Get DCBX status */ + pi->dcbx_status = ice_get_dcbx_status(hw); + + if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || + pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) { + /* Get current DCBX configuration */ + ret = ice_get_dcb_cfg(pi); + pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM); + if (ret) + return ret; + } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) { + return ICE_ERR_NOT_READY; + } + + /* Configure the LLDP MIB change event */ + ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); + if (!ret) + pi->is_sw_lldp = false; + + return ret; +} + +/** + * ice_add_ieee_ets_common_tlv + * @buf: Data buffer to be populated with ice_dcb_ets_cfg data + * @ets_cfg: Container for ice_dcb_ets_cfg data + * + * Populate the TLV buffer with ice_dcb_ets_cfg data + */ +static void +ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) +{ + u8 priority0, priority1; + u8 offset = 0; + int i; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { + priority0 = ets_cfg->prio_table[i * 2] & 0xF; + priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF; + buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + * + * TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + ice_for_each_traffic_class(i) { + buf[offset] = ets_cfg->tcbwtable[i]; + buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i]; + offset++; + } +} + +/** + * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format + * @tlv: Fill the ETS config data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS CFG TLV + */ +static void +ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u8 maxtcwilling = 0; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + if (etscfg->willing) + maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S); + maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; + buf[0] = maxtcwilling; + + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ + ice_add_ieee_ets_common_tlv(&buf[1], etscfg); +} + +/** + * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format + * @tlv: Fill ETS Recommended TLV in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * + * Prepare IEEE 802.1Qaz ETS REC TLV + */ +static void +ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etsrec; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = htonl(ouisubtype); + + etsrec = &dcbcfg->etsrec; + + /* First Octet is reserved */ + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ + ice_add_ieee_ets_common_tlv(&buf[1], etsrec); +} + +/** + * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store which holds the PFC CFG data + * + * Prepare IEEE 802.1Qaz PFC CFG TLV + */ +static void +ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + if (dcbcfg->pfc.willing) + buf[0] = BIT(ICE_IEEE_PFC_WILLING_S); + + if (dcbcfg->pfc.mbc) + buf[0] |= BIT(ICE_IEEE_PFC_MBC_S); + + buf[0] |= dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcena; +} + +/** + * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format + * @tlv: Fill APP TLV in IEEE format + * @dcbcfg: Local store which holds the APP CFG data + * + * Prepare IEEE 802.1Qaz APP CFG TLV + */ +static void +ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + u16 typelen, len, offset = 0; + u8 priority, selector, i = 0; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + + /* No APP TLVs then just return */ + if (dcbcfg->numapps == 0) + return; + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_APP_PRI); + tlv->ouisubtype = htonl(ouisubtype); + + /* Move offset to App Priority Table */ + offset++; + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (i < dcbcfg->numapps) { + priority = dcbcfg->app[i].priority & 0x7; + selector = dcbcfg->app[i].selector & 0x7; + buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector; + buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; + buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; + /* Move to next app */ + offset += 3; + i++; + if (i >= ICE_DCBX_MAX_APPS) + break; + } + /* len includes size of ouisubtype + 1 reserved + 3*numapps */ + len = sizeof(tlv->ouisubtype) + 1 + (i * 3); + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF)); + tlv->typelen = htons(typelen); +} + +/** + * ice_add_dcb_tlv - Add all IEEE TLVs + * @tlv: Fill TLV data in IEEE format + * @dcbcfg: Local store which holds the DCB Config + * @tlvid: Type of IEEE TLV + * + * Add tlv information + */ +static void +ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, + u16 tlvid) +{ + switch (tlvid) { + case ICE_IEEE_TLV_ID_ETS_CFG: + ice_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_ETS_REC: + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_PFC_CFG: + ice_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_APP_PRI: + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format + * @lldpmib: pointer to the HW struct + * @miblen: length of LLDP MIB + * @dcbcfg: Local store which holds the DCB Config + * + * Convert the DCB configuration to MIB format + */ +static void +ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) +{ + u16 len, offset = 0, tlvid = ICE_TLV_ID_START; + struct ice_lldp_org_tlv *tlv; + u16 typelen; + + tlv = (struct ice_lldp_org_tlv *)lldpmib; + while (1) { + ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); + typelen = ntohs(tlv->typelen); + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + if (len) + offset += len + 2; + /* END TLV or beyond LLDPDU size */ + if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU || + offset > ICE_LLDPDU_SIZE) + break; + /* Move to next TLV */ + if (len) + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + } + *miblen = offset; +} + +/** + * ice_set_dcb_cfg - Set the local LLDP MIB to FW + * @pi: port information structure + * + * Set DCB configuration to the Firmware + */ +enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) +{ + u8 mib_type, *lldpmib = NULL; + struct ice_dcbx_cfg *dcbcfg; + enum ice_status ret; + struct ice_hw *hw; + u16 miblen; + + if (!pi) + return ICE_ERR_PARAM; + + hw = pi->hw; + + /* update the HW local config */ + dcbcfg = &pi->local_dcbx_cfg; + /* Allocate the LLDPDU */ + lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); + if (!lldpmib) + return ICE_ERR_NO_MEMORY; + + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; + if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) + mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING; + + ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg); + ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, + NULL); + + devm_kfree(ice_hw_to_dev(hw), lldpmib); + + return ret; +} + +/** + * ice_aq_query_port_ets - query port ets configuration + * @pi: port information structure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * query current port ets configuration + */ +static enum ice_status +ice_aq_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_query_port_ets *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!pi) + return ICE_ERR_PARAM; + cmd = &desc.params.port_ets; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); + cmd->port_teid = pi->root->info.node_teid; + + status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); + return status; +} + +/** + * ice_update_port_tc_tree_cfg - update TC tree configuration + * @pi: port information structure + * @buf: pointer to buffer + * + * update the SW DB with the new TC changes + */ +static enum ice_status +ice_update_port_tc_tree_cfg(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf) +{ + struct ice_sched_node *node, *tc_node; + struct ice_aqc_get_elem elem; + enum ice_status status = 0; + u32 teid1, teid2; + u8 i, j; + + if (!pi) + return ICE_ERR_PARAM; + /* suspend the missing TC nodes */ + for (i = 0; i < pi->root->num_children; i++) { + teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); + ice_for_each_traffic_class(j) { + teid2 = le32_to_cpu(buf->tc_node_teid[j]); + if (teid1 == teid2) + break; + } + if (j < ICE_MAX_TRAFFIC_CLASS) + continue; + /* TC is missing */ + pi->root->children[i]->in_use = false; + } + /* add the new TC nodes */ + ice_for_each_traffic_class(j) { + teid2 = le32_to_cpu(buf->tc_node_teid[j]); + if (teid2 == ICE_INVAL_TEID) + continue; + /* Is it already present in the tree ? */ + for (i = 0; i < pi->root->num_children; i++) { + tc_node = pi->root->children[i]; + if (!tc_node) + continue; + teid1 = le32_to_cpu(tc_node->info.node_teid); + if (teid1 == teid2) { + tc_node->tc_num = j; + tc_node->in_use = true; + break; + } + } + if (i < pi->root->num_children) + continue; + /* new TC */ + status = ice_sched_query_elem(pi->hw, teid2, &elem); + if (!status) + status = ice_sched_add_node(pi, 1, &elem.generic[0]); + if (status) + break; + /* update the TC number */ + node = ice_sched_find_node_by_teid(pi->root, teid2); + if (node) + node->tc_num = j; + } + return status; +} + +/** + * ice_query_port_ets - query port ets configuration + * @pi: port information structure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * query current port ets configuration and update the + * SW DB with the TC changes + */ +enum ice_status +ice_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + enum ice_status status; + + mutex_lock(&pi->sched_lock); + status = ice_aq_query_port_ets(pi, buf, buf_size, cd); + if (!status) + status = ice_update_port_tc_tree_cfg(pi, buf); + mutex_unlock(&pi->sched_lock); + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h new file mode 100644 index 000000000000..e7d4416e3a66 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_DCB_H_ +#define _ICE_DCB_H_ + +#include "ice_type.h" + +#define ICE_DCBX_STATUS_NOT_STARTED 0 +#define ICE_DCBX_STATUS_IN_PROGRESS 1 +#define ICE_DCBX_STATUS_DONE 2 +#define ICE_DCBX_STATUS_DIS 7 + +#define ICE_TLV_TYPE_END 0 +#define ICE_TLV_TYPE_ORG 127 + +#define ICE_IEEE_8021QAZ_OUI 0x0080C2 +#define ICE_IEEE_SUBTYPE_ETS_CFG 9 +#define ICE_IEEE_SUBTYPE_ETS_REC 10 +#define ICE_IEEE_SUBTYPE_PFC_CFG 11 +#define ICE_IEEE_SUBTYPE_APP_PRI 12 + +#define ICE_CEE_DCBX_OUI 0x001B21 +#define ICE_CEE_DCBX_TYPE 2 +#define ICE_CEE_SUBTYPE_PG_CFG 2 +#define ICE_CEE_SUBTYPE_PFC_CFG 3 +#define ICE_CEE_SUBTYPE_APP_PRI 4 +#define ICE_CEE_MAX_FEAT_TYPE 3 +/* Defines for LLDP TLV header */ +#define ICE_LLDP_TLV_LEN_S 0 +#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S) +#define ICE_LLDP_TLV_TYPE_S 9 +#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S) +#define ICE_LLDP_TLV_SUBTYPE_S 0 +#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S) +#define ICE_LLDP_TLV_OUI_S 8 +#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S) + +/* Defines for IEEE ETS TLV */ +#define ICE_IEEE_ETS_MAXTC_S 0 +#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S) +#define ICE_IEEE_ETS_CBS_S 6 +#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S) +#define ICE_IEEE_ETS_WILLING_S 7 +#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S) +#define ICE_IEEE_ETS_PRIO_0_S 0 +#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S) +#define ICE_IEEE_ETS_PRIO_1_S 4 +#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S) +#define ICE_CEE_PGID_PRIO_0_S 0 +#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S) +#define ICE_CEE_PGID_PRIO_1_S 4 +#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S) +#define ICE_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define ICE_IEEE_TSA_STRICT 0 +#define ICE_IEEE_TSA_ETS 2 + +/* Defines for IEEE PFC TLV */ +#define ICE_IEEE_PFC_CAP_S 0 +#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S) +#define ICE_IEEE_PFC_MBC_S 6 +#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S) +#define ICE_IEEE_PFC_WILLING_S 7 +#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S) + +/* Defines for IEEE APP TLV */ +#define ICE_IEEE_APP_SEL_S 0 +#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S) +#define ICE_IEEE_APP_PRIO_S 5 +#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) + +/* TLV definitions for preparing MIB */ +#define ICE_IEEE_TLV_ID_ETS_CFG 3 +#define ICE_IEEE_TLV_ID_ETS_REC 4 +#define ICE_IEEE_TLV_ID_PFC_CFG 5 +#define ICE_IEEE_TLV_ID_APP_PRI 6 +#define ICE_TLV_ID_END_OF_LLDPPDU 7 +#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG + +#define ICE_IEEE_ETS_TLV_LEN 25 +#define ICE_IEEE_PFC_TLV_LEN 6 +#define ICE_IEEE_APP_TLV_LEN 11 + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct ice_lldp_org_tlv { + __be16 typelen; + __be32 ouisubtype; + u8 tlvinfo[1]; +} __packed; + +struct ice_cee_tlv_hdr { + __be16 typelen; + u8 operver; + u8 maxver; +}; + +struct ice_cee_ctrl_tlv { + struct ice_cee_tlv_hdr hdr; + __be32 seqno; + __be32 ackno; +}; + +struct ice_cee_feat_tlv { + struct ice_cee_tlv_hdr hdr; + u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ +#define ICE_CEE_FEAT_TLV_ENA_M 0x80 +#define ICE_CEE_FEAT_TLV_WILLING_M 0x40 +#define ICE_CEE_FEAT_TLV_ERR_M 0x20 + u8 subtype; + u8 tlvinfo[1]; +}; + +struct ice_cee_app_prio { + __be16 protocol; + u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ +#define ICE_CEE_APP_SELECTOR_M 0x03 + __be16 lower_oui; + u8 prio_map; +} __packed; + +u8 ice_get_dcbx_status(struct ice_hw *hw); +enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg); +enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); +enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); +enum ice_status ice_init_dcb(struct ice_hw *hw); +enum ice_status +ice_query_port_ets(struct ice_port_info *pi, + struct ice_aqc_port_ets_elem *buf, u16 buf_size, + struct ice_sq_cd *cmd_details); +#ifdef CONFIG_DCB +enum ice_status +ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, + struct ice_sq_cd *cd); +enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); +enum ice_status +ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, + bool *dcbx_agent_status, struct ice_sq_cd *cd); +enum ice_status +ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, + struct ice_sq_cd *cd); +#else /* CONFIG_DCB */ +static inline enum ice_status +ice_aq_stop_lldp(struct ice_hw __always_unused *hw, + bool __always_unused shutdown_lldp_agent, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline enum ice_status +ice_aq_start_lldp(struct ice_hw __always_unused *hw, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline enum ice_status +ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw, + bool __always_unused start_dcbx_agent, + bool *dcbx_agent_status, + struct ice_sq_cd __always_unused *cd) +{ + *dcbx_agent_status = false; + + return 0; +} + +static inline enum ice_status +ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw, + bool __always_unused ena_update, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +#endif /* CONFIG_DCB */ +#endif /* _ICE_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c new file mode 100644 index 000000000000..3e81af1884fc --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019, Intel Corporation. */ + +#include "ice_dcb_lib.h" + +/** + * ice_dcb_get_ena_tc - return bitmap of enabled TCs + * @dcbcfg: DCB config to evaluate for enabled TCs + */ +u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) +{ + u8 i, num_tc, ena_tc = 1; + + num_tc = ice_dcb_get_num_tc(dcbcfg); + + for (i = 0; i < num_tc; i++) + ena_tc |= BIT(i); + + return ena_tc; +} + +/** + * ice_dcb_get_num_tc - Get the number of TCs from DCBX config + * @dcbcfg: config to retrieve number of TCs from + */ +u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) +{ + bool tc_unused = false; + u8 num_tc = 0; + u8 ret = 0; + int i; + + /* Scan the ETS Config Priority Table to find traffic classes + * enabled and create a bitmask of enabled TCs + */ + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + num_tc |= BIT(dcbcfg->etscfg.prio_table[i]); + + /* Scan bitmask for contiguous TCs starting with TC0 */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + pr_err("Non-contiguous TCs - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = true; + } + } + + /* There is always at least 1 TC */ + if (!ret) + ret = 1; + + return ret; +} + +/** + * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC + * @vsi: VSI owner of rings being updated + */ +void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) +{ + struct ice_ring *tx_ring, *rx_ring; + u16 qoffset, qcount; + int i, n; + + if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_txq; i++) { + tx_ring = vsi->tx_rings[i]; + tx_ring->dcb_tc = 0; + } + for (i = 0; i < vsi->num_rxq; i++) { + rx_ring = vsi->rx_rings[i]; + rx_ring->dcb_tc = 0; + } + return; + } + + ice_for_each_traffic_class(n) { + if (!(vsi->tc_cfg.ena_tc & BIT(n))) + break; + + qoffset = vsi->tc_cfg.tc_info[n].qoffset; + qcount = vsi->tc_cfg.tc_info[n].qcount_tx; + for (i = qoffset; i < (qoffset + qcount); i++) { + tx_ring = vsi->tx_rings[i]; + rx_ring = vsi->rx_rings[i]; + tx_ring->dcb_tc = n; + rx_ring->dcb_tc = n; + } + } +} + +/** + * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs + * @pf: pointer to the PF struct + * + * Assumed caller has already disabled all VSIs before + * calling this function. Reconfiguring DCB based on + * local_dcbx_cfg. + */ +static void ice_pf_dcb_recfg(struct ice_pf *pf) +{ + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + u8 tc_map = 0; + int v, ret; + + /* Update each VSI */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + + if (pf->vsi[v]->type == ICE_VSI_PF) + tc_map = ice_dcb_get_ena_tc(dcbcfg); + else + tc_map = ICE_DFLT_TRAFFIC_CLASS; + + ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to config TC for VSI index: %d\n", + pf->vsi[v]->idx); + else + ice_vsi_map_rings_to_vectors(pf->vsi[v]); + } +} + +/** + * ice_pf_dcb_cfg - Apply new DCB configuration + * @pf: pointer to the PF struct + * @new_cfg: DCBX config to apply + */ +static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) +{ + struct ice_dcbx_cfg *old_cfg, *curr_cfg; + struct ice_aqc_port_ets_elem buf = { 0 }; + int ret = 0; + + curr_cfg = &pf->hw.port_info->local_dcbx_cfg; + + /* Enable DCB tagging only when more than one TC */ + if (ice_dcb_get_num_tc(new_cfg) > 1) { + dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); + set_bit(ICE_FLAG_DCB_ENA, pf->flags); + } else { + dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + } + + if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) { + dev_dbg(&pf->pdev->dev, "No change in DCB config required\n"); + return ret; + } + + /* Store old config in case FW config fails */ + old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL); + memcpy(old_cfg, curr_cfg, sizeof(*old_cfg)); + + /* avoid race conditions by holding the lock while disabling and + * re-enabling the VSI + */ + rtnl_lock(); + ice_pf_dis_all_vsi(pf, true); + + memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); + memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec)); + + /* Only send new config to HW if we are in SW LLDP mode. Otherwise, + * the new config came from the HW in the first place. + */ + if (pf->hw.port_info->is_sw_lldp) { + ret = ice_set_dcb_cfg(pf->hw.port_info); + if (ret) { + dev_err(&pf->pdev->dev, "Set DCB Config failed\n"); + /* Restore previous settings to local config */ + memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); + goto out; + } + } + + ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + goto out; + } + + ice_pf_dcb_recfg(pf); + +out: + ice_pf_ena_all_vsi(pf, true); + rtnl_unlock(); + devm_kfree(&pf->pdev->dev, old_cfg); + return ret; +} + +/** + * ice_dcb_rebuild - rebuild DCB post reset + * @pf: physical function instance + */ +void ice_dcb_rebuild(struct ice_pf *pf) +{ + struct ice_aqc_port_ets_elem buf = { 0 }; + struct ice_dcbx_cfg *prev_cfg; + enum ice_status ret; + u8 willing; + + ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + goto dcb_error; + } + + /* If DCB was not enabled previously, we are done */ + if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + return; + + /* Save current willing state and force FW to unwilling */ + willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing; + pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0; + ret = ice_set_dcb_cfg(pf->hw.port_info); + if (ret) { + dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); + goto dcb_error; + } + + /* Retrieve DCB config and ensure same as current in SW */ + prev_cfg = devm_kmemdup(&pf->pdev->dev, + &pf->hw.port_info->local_dcbx_cfg, + sizeof(*prev_cfg), GFP_KERNEL); + if (!prev_cfg) { + dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n"); + goto dcb_error; + } + + ice_init_dcb(&pf->hw); + if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg, + sizeof(*prev_cfg))) { + /* difference in cfg detected - disable DCB till next MIB */ + dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); + devm_kfree(&pf->pdev->dev, prev_cfg); + goto dcb_error; + } + + /* fetched config congruent to previous configuration */ + devm_kfree(&pf->pdev->dev, prev_cfg); + + /* Configuration replayed - reset willing state to previous */ + pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing; + ret = ice_set_dcb_cfg(pf->hw.port_info); + if (ret) { + dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n"); + goto dcb_error; + } + dev_info(&pf->pdev->dev, "DCB restored after reset\n"); + ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + goto dcb_error; + } + + return; + +dcb_error: + dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n"); + prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL); + prev_cfg->etscfg.willing = true; + prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW; + prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; + memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec)); + ice_pf_dcb_cfg(pf, prev_cfg); + devm_kfree(&pf->pdev->dev, prev_cfg); +} + +/** + * ice_dcb_init_cfg - set the initial DCB config in SW + * @pf: pf to apply config to + */ +static int ice_dcb_init_cfg(struct ice_pf *pf) +{ + struct ice_dcbx_cfg *newcfg; + struct ice_port_info *pi; + int ret = 0; + + pi = pf->hw.port_info; + newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL); + if (!newcfg) + return -ENOMEM; + + memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg)); + memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); + + dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); + if (ice_pf_dcb_cfg(pf, newcfg)) + ret = -EINVAL; + + devm_kfree(&pf->pdev->dev, newcfg); + + return ret; +} + +/** + * ice_dcb_sw_default_config - Apply a default DCB config + * @pf: pf to apply config to + */ +static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf) +{ + struct ice_aqc_port_ets_elem buf = { 0 }; + struct ice_dcbx_cfg *dcbcfg; + struct ice_port_info *pi; + struct ice_hw *hw; + int ret; + + hw = &pf->hw; + pi = hw->port_info; + dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL); + + memset(dcbcfg, 0, sizeof(*dcbcfg)); + memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); + + dcbcfg->etscfg.willing = 1; + dcbcfg->etscfg.maxtcs = 8; + dcbcfg->etscfg.tcbwtable[0] = 100; + dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; + + memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg, + sizeof(dcbcfg->etsrec)); + dcbcfg->etsrec.willing = 0; + + dcbcfg->pfc.willing = 1; + dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS; + + dcbcfg->numapps = 1; + dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE; + dcbcfg->app[0].priority = 3; + dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; + + ret = ice_pf_dcb_cfg(pf, dcbcfg); + devm_kfree(&pf->pdev->dev, dcbcfg); + if (ret) + return ret; + + return ice_query_port_ets(pi, &buf, sizeof(buf), NULL); +} + +/** + * ice_init_pf_dcb - initialize DCB for a PF + * @pf: pf to initiialize DCB for + */ +int ice_init_pf_dcb(struct ice_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + struct ice_port_info *port_info; + struct ice_hw *hw = &pf->hw; + int sw_default = 0; + int err; + + port_info = hw->port_info; + + /* check if device is DCB capable */ + if (!hw->func_caps.common_cap.dcb) { + dev_dbg(dev, "DCB not supported\n"); + return -EOPNOTSUPP; + } + + /* Best effort to put DCBx and LLDP into a good state */ + port_info->dcbx_status = ice_get_dcbx_status(hw); + if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE && + port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { + bool dcbx_status; + + /* Attempt to start LLDP engine. Ignore errors + * as this will error if it is already started + */ + ice_aq_start_lldp(hw, NULL); + + /* Attempt to start DCBX. Ignore errors as this + * will error if it is already started + */ + ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL); + } + + err = ice_init_dcb(hw); + if (err) { + /* FW LLDP not in usable state, default to SW DCBx/LLDP */ + dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n"); + hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; + hw->port_info->is_sw_lldp = true; + } + + if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS) + dev_info(&pf->pdev->dev, "DCBX disabled\n"); + + /* LLDP disabled in FW */ + if (port_info->is_sw_lldp) { + sw_default = 1; + dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n"); + } + + if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { + sw_default = 1; + dev_info(&pf->pdev->dev, "DCBX not started\n"); + } + + if (sw_default) { + err = ice_dcb_sw_dflt_cfg(pf); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to set local DCB config %d\n", err); + err = -EIO; + goto dcb_init_err; + } + + pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + set_bit(ICE_FLAG_DCB_ENA, pf->flags); + return 0; + } + + /* DCBX in FW and LLDP enabled in FW */ + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; + + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + + err = ice_dcb_init_cfg(pf); + if (err) + goto dcb_init_err; + + dev_info(&pf->pdev->dev, "DCBX offload supported\n"); + return err; + +dcb_init_err: + dev_err(dev, "DCB init failed\n"); + return err; +} + +/** + * ice_update_dcb_stats - Update DCB stats counters + * @pf: PF whose stats needs to be updated + */ +void ice_update_dcb_stats(struct ice_pf *pf) +{ + struct ice_hw_port_stats *prev_ps, *cur_ps; + struct ice_hw *hw = &pf->hw; + u8 pf_id = hw->pf_id; + int i; + + prev_ps = &pf->stats_prev; + cur_ps = &pf->stats; + + for (i = 0; i < 8; i++) { + ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i), + pf->stat_prev_loaded, + &prev_ps->priority_xoff_rx[i], + &cur_ps->priority_xoff_rx[i]); + ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i), + pf->stat_prev_loaded, + &prev_ps->priority_xon_rx[i], + &cur_ps->priority_xon_rx[i]); + ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i), + pf->stat_prev_loaded, + &prev_ps->priority_xon_tx[i], + &cur_ps->priority_xon_tx[i]); + ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i), + pf->stat_prev_loaded, + &prev_ps->priority_xoff_tx[i], + &cur_ps->priority_xoff_tx[i]); + ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i), + pf->stat_prev_loaded, + &prev_ps->priority_xon_2_xoff[i], + &cur_ps->priority_xon_2_xoff[i]); + } +} + +/** + * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB + * @tx_ring: ring to send buffer on + * @first: pointer to struct ice_tx_buf + */ +int +ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, + struct ice_tx_buf *first) +{ + struct sk_buff *skb = first->skb; + + if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) + return 0; + + /* Insert 802.1p priority into VLAN header */ + if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) || + skb->priority != TC_PRIO_CONTROL) { + first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M; + /* Mask the lower 3 bits to set the 802.1p priority */ + first->tx_flags |= (skb->priority & 0x7) << + ICE_TX_FLAGS_VLAN_PR_S; + if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(first->tx_flags >> + ICE_TX_FLAGS_VLAN_S); + } else { + first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; + } + } + + return 0; +} + +/** + * ice_dcb_process_lldp_set_mib_change - Process MIB change + * @pf: ptr to ice_pf + * @event: pointer to the admin queue receive event + */ +void +ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, + struct ice_rq_event_info *event) +{ + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + struct ice_dcbx_cfg *dcbcfg, *prev_cfg; + int err; + + prev_cfg = &pf->hw.port_info->local_dcbx_cfg; + dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg, + sizeof(*dcbcfg), GFP_KERNEL); + if (!dcbcfg) + return; + + err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg); + if (!err) + ice_pf_dcb_cfg(pf, dcbcfg); + + devm_kfree(&pf->pdev->dev, dcbcfg); + + /* Get updated DCBx data from firmware */ + err = ice_get_dcb_cfg(pf->hw.port_info); + if (err) + dev_err(&pf->pdev->dev, + "Failed to get DCB config\n"); + } else { + dev_dbg(&pf->pdev->dev, + "MIB Change Event in HOST mode\n"); + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h new file mode 100644 index 000000000000..ca7b76faa03c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019, Intel Corporation. */ + +#ifndef _ICE_DCB_LIB_H_ +#define _ICE_DCB_LIB_H_ + +#include "ice.h" +#include "ice_lib.h" + +#ifdef CONFIG_DCB +#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */ + +void ice_dcb_rebuild(struct ice_pf *pf); +u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); +u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); +void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); +int ice_init_pf_dcb(struct ice_pf *pf); +void ice_update_dcb_stats(struct ice_pf *pf); +int +ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring, + struct ice_tx_buf *first); +void +ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, + struct ice_rq_event_info *event); +static inline void +ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) +{ + tlan_ctx->cgd_num = ring->dcb_tc; +} +#else +#define ice_dcb_rebuild(pf) do {} while (0) + +static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) +{ + return ICE_DFLT_TRAFFIC_CLASS; +} + +static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) +{ + return 1; +} + +static inline int ice_init_pf_dcb(struct ice_pf *pf) +{ + dev_dbg(&pf->pdev->dev, "DCB not supported\n"); + return -EOPNOTSUPP; +} + +static inline int +ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, + struct ice_tx_buf __always_unused *first) +{ + return 0; +} + +#define ice_update_dcb_stats(pf) do {} while (0) +#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) +#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) +#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) +#endif /* CONFIG_DCB */ +#endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index eb8d149e317c..64a4c4456ba0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -4,6 +4,8 @@ /* ethtool support for ice */ #include "ice.h" +#include "ice_lib.h" +#include "ice_dcb_lib.h" struct ice_stats { char stat_string[ETH_GSTRING_LEN]; @@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev) #define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats) #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) -#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \ - ice_q_stats_len(n)) +#define ICE_PFC_STATS_LEN ( \ + (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \ + FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \ + FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \ + FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \ + / sizeof(u64)) +#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ + ICE_VSI_STATS_LEN + ice_q_stats_len(n)) static const struct ice_stats ice_gstrings_vsi_stats[] = { ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast), @@ -126,6 +134,7 @@ struct ice_priv_flag { static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), + ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.tx-priority-%u-xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.tx-priority-%u-xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx-priority-%u-xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.rx-priority-%u-xoff", i); + p += ETH_GSTRING_LEN; + } break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { @@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev) static int ice_set_priv_flags(struct net_device *netdev, u32 flags) { struct ice_netdev_priv *np = netdev_priv(netdev); + DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS); + DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + int ret = 0; u32 i; if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) return -EINVAL; + set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); + + bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { const struct ice_priv_flag *priv_flag; @@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) clear_bit(priv_flag->bitno, pf->flags); } - return 0; + bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); + + if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) { + if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) { + enum ice_status status; + + status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, + NULL); + /* If unregistering for LLDP events fails, this is + * not an error state, as there shouldn't be any + * events to respond to. + */ + if (status) + dev_info(&pf->pdev->dev, + "Failed to unreg for LLDP events\n"); + + /* The AQ call to stop the FW LLDP agent will generate + * an error if the agent is already stopped. + */ + status = ice_aq_stop_lldp(&pf->hw, true, NULL); + if (status) + dev_warn(&pf->pdev->dev, + "Fail to stop LLDP agent\n"); + /* Use case for having the FW LLDP agent stopped + * will likely not need DCB, so failure to init is + * not a concern of ethtool + */ + status = ice_init_pf_dcb(pf); + if (status) + dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); + } else { + enum ice_status status; + bool dcbx_agent_status; + + /* AQ command to start FW LLDP agent will return an + * error if the agent is already started + */ + status = ice_aq_start_lldp(&pf->hw, NULL); + if (status) + dev_warn(&pf->pdev->dev, + "Fail to start LLDP Agent\n"); + + /* AQ command to start FW DCBx agent will fail if + * the agent is already started + */ + status = ice_aq_start_stop_dcbx(&pf->hw, true, + &dcbx_agent_status, + NULL); + if (status) + dev_dbg(&pf->pdev->dev, + "Failed to start FW DCBX\n"); + + dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n", + dcbx_agent_status ? "ACTIVE" : "DISABLED"); + + /* Failure to configure MIB change or init DCB is not + * relevant to ethtool. Print notification that + * registration/init failed but do not return error + * state to ethtool + */ + status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, + NULL); + if (status) + dev_dbg(&pf->pdev->dev, + "Fail to reg for MIB change\n"); + + status = ice_init_pf_dcb(pf); + if (status) + dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); + } + } + clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); + return ret; } static int ice_get_sset_count(struct net_device *netdev, int sset) @@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev, data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + + for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_tx[j]; + data[i++] = pf->stats.priority_xoff_tx[j]; + } + + for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_rx[j]; + data[i++] = pf->stats.priority_xoff_rx[j]; + } } /** @@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks, link_info = &vsi->port_info->phy.link_info; - /* Initialize supported and advertised settings based on phy settings */ + /* Initialize supported and advertised settings based on PHY settings */ switch (link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_100BASE_TX: ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); @@ -1140,7 +1253,7 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks, struct net_device __always_unused *netdev) { /* link is down and the driver needs to fall back on - * supported phy types to figure out what info to display + * supported PHY types to figure out what info to display */ ice_phy_type_to_ethtool(netdev, ks); @@ -1156,8 +1269,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks, * * Reports speed/duplex settings based on media_type */ -static int ice_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +static int +ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; @@ -1349,7 +1463,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, } else { /* If autoneg is currently enabled */ if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { - /* If autoneg is supported 10GBASE_T is the only phy + /* If autoneg is supported 10GBASE_T is the only PHY * that can disable it, so otherwise return error */ if (ethtool_link_ksettings_test_link_mode(ks, @@ -1399,14 +1513,13 @@ ice_set_link_ksettings(struct net_device *netdev, if (!p) return -EOPNOTSUPP; - /* Check if this is lan vsi */ - for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { + /* Check if this is LAN VSI */ + ice_for_each_vsi(pf, idx) if (pf->vsi[idx]->type == ICE_VSI_PF) { if (np->vsi != pf->vsi[idx]) return -EOPNOTSUPP; break; } - } if (p->phy.media_type != ICE_MEDIA_BASET && p->phy.media_type != ICE_MEDIA_FIBER && @@ -1464,7 +1577,7 @@ ice_set_link_ksettings(struct net_device *netdev, if (!abilities) return -ENOMEM; - /* Get the current phy config */ + /* Get the current PHY config */ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, NULL); if (status) { @@ -1559,15 +1672,16 @@ done: } /** - * ice_get_rxnfc - command to get RX flow classification rules + * ice_get_rxnfc - command to get Rx flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command * @rule_locs: buffer to rturn Rx flow classification rules * * Returns Success if the command is supported. */ -static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, - u32 __always_unused *rule_locs) +static int +ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -1821,18 +1935,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; struct ice_vsi *vsi = np->vsi; + struct ice_dcbx_cfg *dcbx_cfg; enum ice_status status; /* Initialize pause params */ pause->rx_pause = 0; pause->tx_pause = 0; + dcbx_cfg = &pi->local_dcbx_cfg; + pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return; - /* Get current phy config */ + /* Get current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, NULL); if (status) @@ -1841,6 +1958,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + if (dcbx_cfg->pfc.pfcena) + /* PFC enabled so report LFC as off */ + goto out; + if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) pause->tx_pause = 1; if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) @@ -1861,6 +1982,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; struct ice_pf *pf = np->vsi->back; + struct ice_dcbx_cfg *dcbx_cfg; struct ice_vsi *vsi = np->vsi; struct ice_hw *hw = &pf->hw; struct ice_port_info *pi; @@ -1871,6 +1993,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) pi = vsi->port_info; hw_link_info = &pi->phy.link_info; + dcbx_cfg = &pi->local_dcbx_cfg; link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; /* Changing the port's flow control is not supported if this isn't the @@ -1893,6 +2016,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } + if (dcbx_cfg->pfc.pfcena) { + netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } if (pause->rx_pause && pause->tx_pause) pi->fc.req_mode = ICE_FC_FULL; else if (pause->rx_pause && !pause->tx_pause) @@ -2021,11 +2148,12 @@ out: * @key: hash key * @hfunc: hash function * - * Returns -EINVAL if the table specifies an invalid queue id, otherwise + * Returns -EINVAL if the table specifies an invalid queue ID, otherwise * returns 0 after programming the table. */ -static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int +ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2087,7 +2215,7 @@ enum ice_container_type { /** * ice_get_rc_coalesce - get ITR values for specific ring container * @ec: ethtool structure to fill with driver's coalesce settings - * @c_type: container type, RX or TX + * @c_type: container type, Rx or Tx * @rc: ring container that the ITR values will come from * * Query the device for ice_ring_container specific ITR values. This is @@ -2180,15 +2308,16 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) return __ice_get_coalesce(netdev, ec, -1); } -static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, - struct ethtool_coalesce *ec) +static int +ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) { return __ice_get_coalesce(netdev, ec, q_num); } /** * ice_set_rc_coalesce - set ITR values for specific ring container - * @c_type: container type, RX or TX + * @c_type: container type, Rx or Tx * @ec: ethtool structure from user to update ITR settings * @rc: ring container that the ITR values will come from * @vsi: VSI associated to the ring container @@ -2325,8 +2454,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) return __ice_set_coalesce(netdev, ec, -1); } -static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, - struct ethtool_coalesce *ec) +static int +ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) { return __ice_set_coalesce(netdev, ec, q_num); } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6bf5cc064270..e172ca002a0a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -49,6 +49,9 @@ #define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQT 0x0022E300 +#define PRTDCB_GENS 0x00083020 +#define PRTDCB_GENS_DCBX_STATUS_S 0 +#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) @@ -106,6 +109,16 @@ #define VPGEN_VFRTRIG_VFSWR_M BIT(0) #define PFHMC_ERRORDATA 0x00520500 #define PFHMC_ERRORINFO 0x00520400 +#define GLINT_CTL 0x0016CC54 +#define GLINT_CTL_DIS_AUTOMASK_M BIT(0) +#define GLINT_CTL_ITR_GRAN_200_S 16 +#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16) +#define GLINT_CTL_ITR_GRAN_100_S 20 +#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20) +#define GLINT_CTL_ITR_GRAN_50_S 24 +#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24) +#define GLINT_CTL_ITR_GRAN_25_S 28 +#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) @@ -168,6 +181,8 @@ #define VPINT_ALLOC_PCI_LAST_S 12 #define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12) #define VPINT_ALLOC_PCI_VALID_M BIT(31) +#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) +#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30) #define GLLAN_RCTL_0 0x002941F8 #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) @@ -306,11 +321,16 @@ #define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) #define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) #define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) +#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) +#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) +#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64)) +#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64)) #define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) #define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) #define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) #define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) #define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) +#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) #define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) #define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) #define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index ef4c79b5aa32..510a8c900e61 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -20,7 +20,7 @@ union ice_32byte_rx_desc { } lo_dword; union { __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow Director filter id */ + __le32 fd_id; /* Flow Director filter ID */ } hi_dword; } qword0; struct { @@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer { ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; -/* RX Flex Descriptor +/* Rx Flex Descriptor * This descriptor is used instead of the legacy version descriptor when * ice_rlan_ctx.adv_desc is set */ @@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc { } read; struct { /* Qword 0 */ - u8 rxdid; /* descriptor builder profile id */ + u8 rxdid; /* descriptor builder profile ID */ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ __le16 pkt_len; /* [15:14] are reserved */ @@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc { /* Rx Flex Descriptor NIC Profile * This descriptor corresponds to RxDID 2 which contains - * metadata fields for RSS, flow id and timestamp info + * metadata fields for RSS, flow ID and timestamp info */ struct ice_32b_rx_flex_desc_nic { /* Qword 0 */ @@ -208,23 +208,23 @@ enum ice_flex_rx_mdid { ICE_RX_MDID_HASH_HIGH, }; -/* Rx Flag64 packet flag bits */ -enum ice_rx_flg64_bits { - ICE_RXFLG_PKT_DSI = 0, - ICE_RXFLG_EVLAN_x8100 = 15, - ICE_RXFLG_EVLAN_x9100, - ICE_RXFLG_VLAN_x8100, - ICE_RXFLG_TNL_MAC = 22, - ICE_RXFLG_TNL_VLAN, - ICE_RXFLG_PKT_FRG, - ICE_RXFLG_FIN = 32, - ICE_RXFLG_SYN, - ICE_RXFLG_RST, - ICE_RXFLG_TNL0 = 38, - ICE_RXFLG_TNL1, - ICE_RXFLG_TNL2, - ICE_RXFLG_UDP_GRE, - ICE_RXFLG_RSVD = 63 +/* Rx/Tx Flag64 packet flag bits */ +enum ice_flg64_bits { + ICE_FLG_PKT_DSI = 0, + ICE_FLG_EVLAN_x8100 = 15, + ICE_FLG_EVLAN_x9100, + ICE_FLG_VLAN_x8100, + ICE_FLG_TNL_MAC = 22, + ICE_FLG_TNL_VLAN, + ICE_FLG_PKT_FRG, + ICE_FLG_FIN = 32, + ICE_FLG_SYN, + ICE_FLG_RST, + ICE_FLG_TNL0 = 38, + ICE_FLG_TNL1, + ICE_FLG_TNL2, + ICE_FLG_UDP_GRE, + ICE_FLG_RSVD = 63 }; /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ @@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 { ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, }; -/* TX Descriptor */ +/* Tx Descriptor */ struct ice_tx_desc { __le64 buf_addr; /* Address of descriptor's data buf */ __le64 cmd_type_offset_bsz; @@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits { ICE_TX_DESC_CMD_EOP = 0x0001, ICE_TX_DESC_CMD_RS = 0x0002, ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, - ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, + ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, + ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, + ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, + ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, + ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, }; #define ICE_TXD_QW1_OFFSET_S 16 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index fa61203bee26..f31129e4e9cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3,6 +3,7 @@ #include "ice.h" #include "ice_lib.h" +#include "ice_dcb_lib.h" /** * ice_setup_rx_ctx - Configure a receive ring context @@ -73,7 +74,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & QRXFLXP_CNTXT_RXDID_IDX_M; - /* increasing context priority to pick up profile id; + /* increasing context priority to pick up profile ID; * default is 0x01; setting to 0x03 to ensure profile * is programming if prev context is of same priority */ @@ -124,6 +125,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) /* Transmit Queue Length */ tlan_ctx->qlen = ring->count; + ice_set_cgd_num(tlan_ctx, ring); + /* PF number */ tlan_ctx->pf_num = hw->pf_id; @@ -138,7 +141,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; case ICE_VSI_VF: - /* Firmware expects vmvf_num to be absolute VF id */ + /* Firmware expects vmvf_num to be absolute VF ID */ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; @@ -175,17 +178,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) int i; for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { - u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); - - if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) - break; + if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & + QRX_CTRL_QENA_STAT_M)) + return 0; usleep_range(20, 40); } - if (i >= ICE_Q_WAIT_MAX_RETRY) - return -ETIMEDOUT; - return 0; + return -ETIMEDOUT; } /** @@ -279,25 +279,50 @@ err_txrings: } /** - * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI + * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI + * @vsi: the VSI being configured + */ +static void ice_vsi_set_num_desc(struct ice_vsi *vsi) +{ + switch (vsi->type) { + case ICE_VSI_PF: + vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; + break; + default: + dev_dbg(&vsi->back->pdev->dev, + "Not setting number of Tx/Rx descriptors for VSI type %d\n", + vsi->type); + break; + } +} + +/** + * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured + * @vf_id: ID of the VF being configured * * Return 0 on success and a negative value on error */ -static void ice_vsi_set_num_qs(struct ice_vsi *vsi) +static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) { struct ice_pf *pf = vsi->back; + struct ice_vf *vf = NULL; + + if (vsi->type == ICE_VSI_VF) + vsi->vf_id = vf_id; + switch (vsi->type) { case ICE_VSI_PF: vsi->alloc_txq = pf->num_lan_tx; vsi->alloc_rxq = pf->num_lan_rx; - vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); break; case ICE_VSI_VF: - vsi->alloc_txq = pf->num_vf_qps; - vsi->alloc_rxq = pf->num_vf_qps; + vf = &pf->vf[vsi->vf_id]; + vsi->alloc_txq = vf->num_vf_qs; + vsi->alloc_rxq = vf->num_vf_qs; /* pf->num_vf_msix includes (VF miscellaneous vector + * data queue interrupts). Since vsi->num_q_vectors is number * of queues vectors, subtract 1 from the original vector @@ -310,6 +335,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->type); break; } + + ice_vsi_set_num_desc(vsi); } /** @@ -455,10 +482,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) * ice_vsi_alloc - Allocates the next available struct VSI in the PF * @pf: board private structure * @type: type of VSI + * @vf_id: ID of the VF being configured * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) +static struct ice_vsi * +ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) { struct ice_vsi *vsi = NULL; @@ -484,7 +513,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) vsi->idx = pf->next_vsi; vsi->work_lmt = ICE_DFLT_IRQ_WORK; - ice_vsi_set_num_qs(vsi); + if (type == ICE_VSI_VF) + ice_vsi_set_num_qs(vsi, vf_id); + else + ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); switch (vsi->type) { case ICE_VSI_PF: @@ -579,11 +611,10 @@ err_scatter: /** * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment + * @qs_cfg: gathered variables needed for pf->vsi queues assignment * - * This is an internal function for assigning queues from the PF to VSI and - * initially tries to find contiguous space. If it is not successful to find - * contiguous space, then it tries with the scatter approach. + * This function first tries to find contiguous space. If it is not successful, + * it tries with the scatter approach. * * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap */ @@ -827,7 +858,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) /* find the (rounded up) power-of-2 of qcount */ pow = order_base_2(qcount_rx); - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ vsi->tc_cfg.tc_info[i].qoffset = 0; @@ -852,7 +883,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) tx_count += tx_numq_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - vsi->num_rxq = offset; + + /* if offset is non-zero, means it is calculated correctly based on + * enabled TCs for a given VSI otherwise qcount_rx will always + * be correct and non-zero because it is based off - VSI's + * allocated Rx queues which is at least 1 (hence qcount_tx will be + * at least 1) + */ + if (offset) + vsi->num_rxq = offset; + else + vsi->num_rxq = qcount_rx; + vsi->num_txq = tx_count; if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { @@ -923,6 +965,7 @@ static int ice_vsi_init(struct ice_vsi *vsi) if (!ctxt) return -ENOMEM; + ctxt->info = vsi->info; switch (vsi->type) { case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; @@ -948,6 +991,14 @@ static int ice_vsi_init(struct ice_vsi *vsi) ctxt->info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, ctxt); + /* Enable MAC Antispoof with new VSI being initialized or updated */ + if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + } + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, @@ -1215,7 +1266,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->ring_active = false; ring->vsi = vsi; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_tx_desc; vsi->tx_rings[i] = ring; } @@ -1234,7 +1285,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; - ring->count = vsi->num_desc; + ring->count = vsi->num_rx_desc; vsi->rx_rings[i] = ring; } @@ -1253,7 +1304,11 @@ err_out: * through the MSI-X enabling code. On a constrained vector budget, we map Tx * and Rx rings to the vector as "efficiently" as possible. */ +#ifdef CONFIG_DCB +void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +#else static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) +#endif /* CONFIG_DCB */ { int q_vectors = vsi->num_q_vectors; int tx_rings_rem, rx_rings_rem; @@ -1397,12 +1452,12 @@ ice_vsi_cfg_rss_exit: } /** - * ice_add_mac_to_list - Add a mac address filter entry to the list + * ice_add_mac_to_list - Add a MAC address filter entry to the list * @vsi: the VSI to be forwarded to * @add_list: pointer to the list which contains MAC filter entries * @macaddr: the MAC address to be added. * - * Adds mac address filter entry to the temp list + * Adds MAC address filter entry to the temp list * * Returns 0 on success or ENOMEM on failure. */ @@ -1504,7 +1559,7 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h) /** * ice_vsi_add_vlan - Add VSI membership for given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be added + * @vid: VLAN ID to be added */ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) { @@ -1542,7 +1597,7 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) /** * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN * @vsi: the VSI being configured - * @vid: VLAN id to be removed + * @vid: VLAN ID to be removed * * Returns 0 on success and negative on failure */ @@ -1640,7 +1695,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) num_q_grps = 1; /* set up and configure the Tx queues for each enabled TC */ - for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { + ice_for_each_traffic_class(tc) { if (!(vsi->tc_cfg.ena_tc & BIT(tc))) break; @@ -1717,6 +1772,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) } /** + * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set + * @hw: board specific structure + */ +static void ice_cfg_itr_gran(struct ice_hw *hw) +{ + u32 regval = rd32(hw, GLINT_CTL); + + /* no need to update global register if ITR gran is already set */ + if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && + (((regval & GLINT_CTL_ITR_GRAN_200_M) >> + GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_100_M) >> + GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_50_M) >> + GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && + (((regval & GLINT_CTL_ITR_GRAN_25_M) >> + GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) + return; + + regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & + GLINT_CTL_ITR_GRAN_200_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & + GLINT_CTL_ITR_GRAN_100_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & + GLINT_CTL_ITR_GRAN_50_M) | + ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & + GLINT_CTL_ITR_GRAN_25_M); + wr32(hw, GLINT_CTL, regval); +} + +/** * ice_cfg_itr - configure the initial interrupt throttle values * @hw: pointer to the HW structure * @q_vector: interrupt vector that's being configured @@ -1728,6 +1814,8 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) static void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) { + ice_cfg_itr_gran(hw); + if (q_vector->num_ring_rx) { struct ice_ring_container *rc = &q_vector->rx; @@ -1738,7 +1826,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; wr32(hw, GLINT_ITR(rc->itr_idx, vector), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } @@ -1753,7 +1840,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) rc->target_itr = ITR_TO_REG(rc->itr_setting); rc->next_update = jiffies + 1; rc->current_itr = rc->target_itr; - rc->latency_range = ICE_LOW_LATENCY; wr32(hw, GLINT_ITR(rc->itr_idx, vector), ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } @@ -1937,7 +2023,7 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) * ice_vsi_stop_tx_rings - Disable Tx rings * @vsi: the VSI being configured * @rst_src: reset source - * @rel_vmvf_num: Relative id of VF/VM + * @rel_vmvf_num: Relative ID of VF/VM * @rings: Tx ring array to be stopped * @offset: offset within vsi->txq_map */ @@ -2023,10 +2109,11 @@ err_alloc_q_ids: * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings * @vsi: the VSI being configured * @rst_src: reset source - * @rel_vmvf_num: Relative id of VF/VM + * @rel_vmvf_num: Relative ID of VF/VM */ -int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, - enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) +int +ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) { return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, 0); @@ -2036,10 +2123,11 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on * @ena: set to true to enable VLAN pruning and false to disable it + * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode * * returns 0 if VSI is updated, negative otherwise */ -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) { struct ice_vsi_ctx *ctxt; struct device *dev; @@ -2067,8 +2155,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; } - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | - ICE_AQ_VSI_PROP_SW_VALID); + if (!vlan_promisc) + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL); if (status) { @@ -2089,12 +2179,20 @@ err_out: return -EIO; } +static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) +{ + struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; + + vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); + vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); +} + /** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @pi: pointer to the port_info instance * @type: VSI type - * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * @vf_id: defines VF ID to which this VSI connects. This field is meant to be * used only for ICE_VSI_VF VSI type. For other VSI types, should * fill-in ICE_INVAL_VFID as input. * @@ -2112,7 +2210,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi *vsi; int ret, i; - vsi = ice_vsi_alloc(pf, type); + if (type == ICE_VSI_VF) + vsi = ice_vsi_alloc(pf, type, vf_id); + else + vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID); + if (!vsi) { dev_err(dev, "could not allocate VSI\n"); return NULL; @@ -2132,7 +2234,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* set RSS capabilities */ ice_vsi_set_rss_params(vsi); - /* set tc configuration */ + /* set TC configuration */ ice_vsi_set_tc_cfg(vsi); /* create the VSI */ @@ -2596,6 +2698,7 @@ int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_vf *vf = NULL; struct ice_pf *pf; int ret, i; @@ -2603,16 +2706,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) return -EINVAL; pf = vsi->back; + if (vsi->type == ICE_VSI_VF) + vf = &pf->vf[vsi->vf_id]; + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - vsi->sw_base_vector = 0; + + if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + vsi->sw_base_vector = 0; + /* reclaim HW interrupts back to the common pool */ + ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, + vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; + } else { + /* Reclaim VF resources back to the common pool for reset and + * and rebuild, with vector reassignment + */ + ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx, + vsi->idx); + pf->num_avail_hw_msix += pf->num_vf_msix; + } vsi->hw_base_vector = 0; + ice_vsi_clear_rings(vsi); ice_vsi_free_arrays(vsi, false); ice_dev_onetime_setup(&vsi->back->hw); - ice_vsi_set_num_qs(vsi); + if (vsi->type == ICE_VSI_VF) + ice_vsi_set_num_qs(vsi, vf->vf_id); + else + ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ @@ -2705,3 +2830,125 @@ bool ice_is_reset_in_progress(unsigned long *state) test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_GLOBR_REQ, state); } + +#ifdef CONFIG_DCB +/** + * ice_vsi_update_q_map - update our copy of the VSI info with new queue map + * @vsi: VSI being configured + * @ctx: the context buffer returned from AQ VSI update command + */ +static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) +{ + vsi->info.mapping_flags = ctx->info.mapping_flags; + memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, + sizeof(vsi->info.q_mapping)); + memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); +} + +/** + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @ena_tc: TC map to be enabled + */ +static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + struct net_device *netdev = vsi->netdev; + struct ice_pf *pf = vsi->back; + struct ice_dcbx_cfg *dcbcfg; + u8 netdev_tc; + int i; + + if (!netdev) + return; + + if (!ena_tc) { + netdev_reset_tc(netdev); + return; + } + + if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) + return; + + dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + + ice_for_each_traffic_class(i) + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; + + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** + * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map + * @vsi: VSI to be configured + * @ena_tc: TC bitmap + * + * VSI queues expected to be quiesced before calling this function + */ +int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) +{ + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_vsi_ctx *ctx; + struct ice_pf *pf = vsi->back; + enum ice_status status; + int i, ret = 0; + u8 num_tc = 0; + + ice_for_each_traffic_class(i) { + /* build bitmap of enabled TCs */ + if (ena_tc & BIT(i)) + num_tc++; + /* populate max_txqs per TC */ + max_txqs[i] = pf->num_lan_tx; + } + + vsi->tc_cfg.ena_tc = ena_tc; + vsi->tc_cfg.numtc = num_tc; + + ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->vf_num = 0; + ctx->info = vsi->info; + + ice_vsi_setup_q_map(vsi, ctx); + + /* must to indicate which section of VSI context are being modified */ + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); + status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); + if (status) { + dev_info(&pf->pdev->dev, "Failed VSI Update\n"); + ret = -EIO; + goto out; + } + + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, + max_txqs); + + if (status) { + dev_err(&pf->pdev->dev, + "VSI %d failed TC config, error %d\n", + vsi->vsi_num, status); + ret = -EIO; + goto out; + } + ice_vsi_update_q_map(vsi, ctx); + vsi->info.valid_sections = 0; + + ice_vsi_cfg_netdev_tc(vsi, ena_tc); +out: + devm_kfree(&pf->pdev->dev, ctx); + return ret; +} +#endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 7988a53729a9..714ace077796 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -35,12 +35,16 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num); -int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); +int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); void ice_vsi_delete(struct ice_vsi *vsi); int ice_vsi_clear(struct ice_vsi *vsi); +#ifdef CONFIG_DCB +int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); +#endif /* CONFIG_DCB */ + struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, enum ice_vsi_type type, u16 vf_id); @@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi); void ice_vsi_put_qs(struct ice_vsi *vsi); +#ifdef CONFIG_DCB +void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); +#endif /* CONFIG_DCB */ + void ice_vsi_dis_irq(struct ice_vsi *vsi); void ice_vsi_free_irq(struct ice_vsi *vsi); @@ -70,8 +78,6 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi); void ice_vsi_free_tx_rings(struct ice_vsi *vsi); -int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); - int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 47cc3f905b7f..8bdd311c1b4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7,8 +7,9 @@ #include "ice.h" #include "ice_lib.h" +#include "ice_dcb_lib.h" -#define DRV_VERSION "0.7.2-k" +#define DRV_VERSION "0.7.4-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -30,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_ops; -static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); static void ice_vsi_release_all(struct ice_pf *pf); @@ -113,14 +113,14 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf) } /** - * ice_add_mac_to_sync_list - creates list of mac addresses to be synced + * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced * @netdev: the net device on which the sync is happening - * @addr: mac address to sync + * @addr: MAC address to sync * * This is a callback function which is called by the in kernel device sync * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only * populates the tmp_sync_list, which is later used by ice_add_mac to add the - * mac filters from the hardware. + * MAC filters from the hardware. */ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) { @@ -134,14 +134,14 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) } /** - * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced + * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced * @netdev: the net device on which the unsync is happening - * @addr: mac address to unsync + * @addr: MAC address to unsync * * This is a callback function which is called by the in kernel device unsync * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only * populates the tmp_unsync_list, which is later used by ice_remove_mac to - * delete the mac filters from the hardware. + * delete the MAC filters from the hardware. */ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) { @@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** + * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * @set_promisc: enable or disable promisc flag request + * + */ +static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) +{ + struct ice_hw *hw = &vsi->back->hw; + enum ice_status status = 0; + + if (vsi->type != ICE_VSI_PF) + return 0; + + if (vsi->vlan_ena) { + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, + set_promisc); + } else { + if (set_promisc) + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + else + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + } + + if (status) + return -EIO; + + return 0; +} + +/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) struct ice_hw *hw = &pf->hw; enum ice_status status = 0; u32 changed_flags = 0; + u8 promisc_m; int err = 0; if (!vsi->netdev) @@ -211,7 +245,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) netif_addr_unlock_bh(netdev); } - /* Remove mac addresses in the unsync list */ + /* Remove MAC addresses in the unsync list */ status = ice_remove_mac(hw, &vsi->tmp_unsync_list); ice_free_fltr_list(dev, &vsi->tmp_unsync_list); if (status) { @@ -223,12 +257,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } - /* Add mac addresses in the sync list */ + /* Add MAC addresses in the sync list */ status = ice_add_mac(hw, &vsi->tmp_sync_list); ice_free_fltr_list(dev, &vsi->tmp_sync_list); - if (status) { + /* If filter is added successfully or already exists, do not go into + * 'if' condition and report it as error. Instead continue processing + * rest of the function. + */ + if (status && status != ICE_ERR_ALREADY_EXISTS) { netdev_err(netdev, "Failed to add MAC filters\n"); - /* If there is no more space for new umac filters, vsi + /* If there is no more space for new umac filters, VSI * should go into promiscuous mode. There should be some * space reserved for promiscuous filters. */ @@ -245,14 +283,41 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } /* check for changes in promiscuous modes */ - if (changed_flags & IFF_ALLMULTI) - netdev_warn(netdev, "Unsupported configuration\n"); + if (changed_flags & IFF_ALLMULTI) { + if (vsi->current_netdev_flags & IFF_ALLMULTI) { + if (vsi->vlan_ena) + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_MCAST_PROMISC_BITS; + + err = ice_cfg_promisc(vsi, promisc_m, true); + if (err) { + netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", + vsi->vsi_num); + vsi->current_netdev_flags &= ~IFF_ALLMULTI; + goto out_promisc; + } + } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { + if (vsi->vlan_ena) + promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_MCAST_PROMISC_BITS; + + err = ice_cfg_promisc(vsi, promisc_m, false); + if (err) { + netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", + vsi->vsi_num); + vsi->current_netdev_flags |= IFF_ALLMULTI; + goto out_promisc; + } + } + } if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) { clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags); if (vsi->current_netdev_flags & IFF_PROMISC) { - /* Apply TX filter rule to get traffic from VMs */ + /* Apply Tx filter rule to get traffic from VMs */ status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_TX); if (status) { @@ -262,7 +327,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) err = -EIO; goto out_promisc; } - /* Apply RX filter rule to get traffic from wire */ + /* Apply Rx filter rule to get traffic from wire */ status = ice_cfg_dflt_vsi(hw, vsi->idx, true, ICE_FLTR_RX); if (status) { @@ -273,7 +338,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) goto out_promisc; } } else { - /* Clear TX filter rule to stop traffic from VMs */ + /* Clear Tx filter rule to stop traffic from VMs */ status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_TX); if (status) { @@ -283,7 +348,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) err = -EIO; goto out_promisc; } - /* Clear RX filter to remove traffic from wire */ + /* Clear Rx filter to remove traffic from wire */ status = ice_cfg_dflt_vsi(hw, vsi->idx, false, ICE_FLTR_RX); if (status) { @@ -322,7 +387,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); - for (v = 0; v < pf->num_alloc_vsi; v++) + ice_for_each_vsi(pf, v) if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && ice_vsi_sync_fltr(pf->vsi[v])) { /* come back and try again later */ @@ -332,6 +397,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) } /** + * ice_dis_vsi - pause a VSI + * @vsi: the VSI being paused + * @locked: is the rtnl_lock already held + */ +static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) +{ + if (test_bit(__ICE_DOWN, vsi->state)) + return; + + set_bit(__ICE_NEEDS_RESTART, vsi->state); + + if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (netif_running(vsi->netdev)) { + if (!locked) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + } + } else { + ice_vsi_close(vsi); + } + } +} + +/** + * ice_pf_dis_all_vsi - Pause all VSIs on a PF + * @pf: the PF + * @locked: is the rtnl_lock already held + */ +#ifdef CONFIG_DCB +void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) +#else +static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) +#endif /* CONFIG_DCB */ +{ + int v; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + ice_dis_vsi(pf->vsi[v], locked); +} + +/** * ice_prepare_for_reset - prep for the core to reset * @pf: board private structure * @@ -342,12 +452,16 @@ ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + /* already prepared for reset */ + if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + return; + /* Notify VFs of impending reset */ if (ice_check_sq_alive(hw, &hw->mailboxq)) ice_vc_notify_reset(pf); /* disable the VSIs and their queues that are not already DOWN */ - ice_pf_dis_all_vsi(pf); + ice_pf_dis_all_vsi(pf, false); if (hw->port_info) ice_sched_clear_port(hw->port_info); @@ -394,6 +508,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ice_rebuild(pf); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); + ice_reset_all_vfs(pf, true); } } @@ -416,10 +531,15 @@ static void ice_reset_subtask(struct ice_pf *pf) * for the reset now), poll for reset done, rebuild and return. */ if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) { - clear_bit(__ICE_GLOBR_RECV, pf->state); - clear_bit(__ICE_CORER_RECV, pf->state); - if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) - ice_prepare_for_reset(pf); + /* Perform the largest reset requested */ + if (test_and_clear_bit(__ICE_CORER_RECV, pf->state)) + reset_type = ICE_RESET_CORER; + if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state)) + reset_type = ICE_RESET_GLOBR; + /* return if no valid reset type requested */ + if (reset_type == ICE_RESET_INVAL) + return; + ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ if (ice_check_reset(&pf->hw)) { @@ -429,13 +549,14 @@ static void ice_reset_subtask(struct ice_pf *pf) pf->hw.reset_ongoing = false; ice_rebuild(pf); /* clear bit to resume normal operations, but - * ICE_NEEDS_RESTART bit is set incase rebuild failed + * ICE_NEEDS_RESTART bit is set in case rebuild failed */ clear_bit(__ICE_RESET_OICR_RECV, pf->state); clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); clear_bit(__ICE_PFR_REQ, pf->state); clear_bit(__ICE_CORER_REQ, pf->state); clear_bit(__ICE_GLOBR_REQ, pf->state); + ice_reset_all_vfs(pf, true); } return; @@ -519,6 +640,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) case ICE_FC_RX_PAUSE: fc = "RX"; break; + case ICE_FC_NONE: + fc = "None"; + break; default: fc = "Unknown"; break; @@ -529,9 +653,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } /** - * ice_vsi_link_event - update the vsi's netdev - * @vsi: the vsi on which the link event occurred - * @link_up: whether or not the vsi needs to be set up or down + * ice_vsi_link_event - update the VSI's netdev + * @vsi: the VSI on which the link event occurred + * @link_up: whether or not the VSI needs to be set up or down */ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) { @@ -635,19 +759,70 @@ static void ice_watchdog_subtask(struct ice_pf *pf) pf->serv_tmr_prev = jiffies; - if (ice_link_event(pf, pf->hw.port_info)) - dev_dbg(&pf->pdev->dev, "ice_link_event failed\n"); - /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ ice_update_pf_stats(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) + ice_for_each_vsi(pf, i) if (pf->vsi[i] && pf->vsi[i]->netdev) ice_update_vsi_stats(pf->vsi[i]); } /** + * ice_init_link_events - enable/initialize link events + * @pi: pointer to the port_info instance + * + * Returns -EIO on failure, 0 on success + */ +static int ice_init_link_events(struct ice_port_info *pi) +{ + u16 mask; + + mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | + ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); + + if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to set link event mask for port %d\n", + pi->lport); + return -EIO; + } + + if (ice_aq_get_link_info(pi, true, NULL, NULL)) { + dev_dbg(ice_hw_to_dev(pi->hw), + "Failed to enable link events for port %d\n", + pi->lport); + return -EIO; + } + + return 0; +} + +/** + * ice_handle_link_event - handle link event via ARQ + * @pf: pf that the link event is associated with + * + * Return -EINVAL if port_info is null + * Return status on success + */ +static int ice_handle_link_event(struct ice_pf *pf) +{ + struct ice_port_info *port_info; + int status; + + port_info = pf->hw.port_info; + if (!port_info) + return -EINVAL; + + status = ice_link_event(pf, port_info); + if (status) + dev_dbg(&pf->pdev->dev, + "Could not process link event, error %d\n", status); + + return status; +} + +/** * __ice_clean_ctrlq - helper function to clean controlq rings * @pf: ptr to struct ice_pf * @q_type: specific Control queue type @@ -750,12 +925,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) opcode = le16_to_cpu(event.desc.opcode); switch (opcode) { + case ice_aqc_opc_get_link_status: + if (ice_handle_link_event(pf)) + dev_err(&pf->pdev->dev, + "Could not handle link event\n"); + break; case ice_mbx_opc_send_msg_to_pf: ice_vc_process_vf_msg(pf, &event); break; case ice_aqc_opc_fw_logging: ice_output_fw_log(hw, &event.desc, event.msg_buf); break; + case ice_aqc_opc_lldp_set_mib_change: + ice_dcb_process_lldp_set_mib_change(pf, &event); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@ -877,6 +1060,18 @@ static void ice_service_task_stop(struct ice_pf *pf) } /** + * ice_service_task_restart - restart service task and schedule works + * @pf: board private structure + * + * This function is needed for suspend and resume works (e.g WoL scenario) + */ +static void ice_service_task_restart(struct ice_pf *pf) +{ + clear_bit(__ICE_SERVICE_DIS, pf->state); + ice_service_task_schedule(pf); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@ -1089,7 +1284,7 @@ static void ice_service_task(struct work_struct *work) /** * ice_set_ctrlq_len - helper function to set controlq length - * @hw: pointer to the hw instance + * @hw: pointer to the HW instance */ static void ice_set_ctrlq_len(struct ice_hw *hw) { @@ -1111,8 +1306,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) * This is a callback function used by the irq_set_affinity_notifier function * so that we may register to receive changes to the irq affinity masks. */ -static void ice_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) +static void +ice_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) { struct ice_q_vector *q_vector = container_of(notify, struct ice_q_vector, affinity_notify); @@ -1184,10 +1380,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[base + vector].vector, - vsi->irq_handler, 0, q_vector->name, - q_vector); + err = devm_request_irq(&pf->pdev->dev, irq_num, + vsi->irq_handler, 0, + q_vector->name, q_vector); if (err) { netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", err); @@ -1649,18 +1844,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) } /** - * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload + * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload * @netdev: network interface to be adjusted * @proto: unused protocol - * @vid: vlan id to be added + * @vid: VLAN ID to be added * - * net_device_ops implementation for adding vlan ids + * net_device_ops implementation for adding VLAN IDs */ -static int ice_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int +ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, + u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -1673,33 +1870,39 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, /* Enable VLAN pruning when VLAN 0 is added */ if (unlikely(!vid)) { - int ret = ice_cfg_vlan_pruning(vsi, true); - + ret = ice_cfg_vlan_pruning(vsi, true, false); if (ret) return ret; } - /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is + /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch */ - return ice_vsi_add_vlan(vsi, vid); + ret = ice_vsi_add_vlan(vsi, vid); + if (!ret) { + vsi->vlan_ena = true; + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + } + + return ret; } /** - * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload + * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload * @netdev: network interface to be adjusted * @proto: unused protocol - * @vid: vlan id to be removed + * @vid: VLAN ID to be removed * - * net_device_ops implementation for removing vlan ids + * net_device_ops implementation for removing VLAN IDs */ -static int ice_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) +static int +ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, + u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int status; + int ret; if (vsi->info.pvid) return -EINVAL; @@ -1707,15 +1910,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, /* Make sure ice_vsi_kill_vlan is successful before updating VLAN * information */ - status = ice_vsi_kill_vlan(vsi, vid); - if (status) - return status; + ret = ice_vsi_kill_vlan(vsi, vid); + if (ret) + return ret; /* Disable VLAN pruning when VLAN 0 is removed */ if (unlikely(!vid)) - status = ice_cfg_vlan_pruning(vsi, false); + ret = ice_cfg_vlan_pruning(vsi, false, false); - return status; + vsi->vlan_ena = false; + set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); + return ret; } /** @@ -2033,23 +2238,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** - * ice_verify_itr_gran - verify driver's assumption of ITR granularity - * @pf: pointer to the PF structure - * - * There is no error returned here because the driver will be able to handle a - * different ITR granularity, but interrupt moderation will not be accurate if - * the driver's assumptions are not verified. This assumption is made so we can - * use constants in the hot path instead of accessing structure members. - */ -static void ice_verify_itr_gran(struct ice_pf *pf) -{ - if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1)) - dev_warn(&pf->pdev->dev, - "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n", - (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran); -} - -/** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure * @@ -2072,9 +2260,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf) * * Returns 0 on success, negative on failure */ -static int ice_probe(struct pci_dev *pdev, - const struct pci_device_id __always_unused *ent) +static int +ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { + struct device *dev = &pdev->dev; struct ice_pf *pf; struct ice_hw *hw; int err; @@ -2086,20 +2275,20 @@ static int ice_probe(struct pci_dev *pdev, err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); if (err) { - dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); + dev_err(dev, "BAR0 I/O map error %d\n", err); return err; } - pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL); + pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL); if (!pf) return -ENOMEM; /* set up for high or low dma */ - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + dev_err(dev, "DMA configuration failed: 0x%x\n", err); return err; } @@ -2133,17 +2322,26 @@ static int ice_probe(struct pci_dev *pdev, err = ice_init_hw(hw); if (err) { - dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err); + dev_err(dev, "ice_init_hw failed: %d\n", err); err = -EIO; goto err_exit_unroll; } - dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n", + dev_info(dev, "firmware %d.%d.%05d api %d.%d\n", hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, hw->api_maj_ver, hw->api_min_ver); ice_init_pf(pf); + err = ice_init_pf_dcb(pf); + if (err) { + clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); + + /* do not fail overall init if DCB init fails */ + err = 0; + } + ice_determine_q_usage(pf); pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; @@ -2152,8 +2350,8 @@ static int ice_probe(struct pci_dev *pdev, goto err_init_pf_unroll; } - pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, - sizeof(*pf->vsi), GFP_KERNEL); + pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), + GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_init_pf_unroll; @@ -2161,8 +2359,7 @@ static int ice_probe(struct pci_dev *pdev, err = ice_init_interrupt_scheme(pf); if (err) { - dev_err(&pdev->dev, - "ice_init_interrupt_scheme failed: %d\n", err); + dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; goto err_init_interrupt_unroll; } @@ -2178,15 +2375,13 @@ static int ice_probe(struct pci_dev *pdev, if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { err = ice_req_irq_msix_misc(pf); if (err) { - dev_err(&pdev->dev, - "setup of misc vector failed: %d\n", err); + dev_err(dev, "setup of misc vector failed: %d\n", err); goto err_init_interrupt_unroll; } } /* create switch struct for the switch element created by FW on boot */ - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw), - GFP_KERNEL); + pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); if (!pf->first_sw) { err = -ENOMEM; goto err_msix_misc_unroll; @@ -2204,8 +2399,7 @@ static int ice_probe(struct pci_dev *pdev, err = ice_setup_pf_sw(pf); if (err) { - dev_err(&pdev->dev, - "probe failed due to setup pf switch:%d\n", err); + dev_err(dev, "probe failed due to setup pf switch:%d\n", err); goto err_alloc_sw_unroll; } @@ -2214,8 +2408,13 @@ static int ice_probe(struct pci_dev *pdev, /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + err = ice_init_link_events(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_link_events failed: %d\n", err); + goto err_alloc_sw_unroll; + } + ice_verify_cacheline_size(pf); - ice_verify_itr_gran(pf); return 0; @@ -2227,7 +2426,7 @@ err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: ice_clear_interrupt_scheme(pf); - devm_kfree(&pdev->dev, pf->vsi); + devm_kfree(dev, pf->vsi); err_init_pf_unroll: ice_deinit_pf(pf); ice_deinit_hw(hw); @@ -2272,6 +2471,136 @@ static void ice_remove(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); } +/** + * ice_pci_err_detected - warning that PCI error has been detected + * @pdev: PCI device information struct + * @err: the type of PCI error + * + * Called to warn that something happened on the PCI bus and the error handling + * is in progress. Allows the driver to gracefully prepare/handle PCI errors. + */ +static pci_ers_result_t +ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!pf) { + dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", + __func__, err); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (!test_bit(__ICE_SUSPENDED, pf->state)) { + ice_service_task_stop(pf); + + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(__ICE_PFR_REQ, pf->state); + ice_prepare_for_reset(pf); + } + } + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ice_pci_err_slot_reset - a PCI slot reset has just happened + * @pdev: PCI device information struct + * + * Called to determine if the driver can recover from the PCI slot reset by + * using a register read to determine if the device is recoverable. + */ +static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + u32 reg; + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset, error %d\n", + err); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_wake_from_d3(pdev, false); + + /* Check for life */ + reg = rd32(&pf->hw, GLGEN_RTRIG); + if (!reg) + result = PCI_ERS_RESULT_RECOVERED; + else + result = PCI_ERS_RESULT_DISCONNECT; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) + dev_dbg(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed, error %d\n", + err); + /* non-fatal, continue */ + + return result; +} + +/** + * ice_pci_err_resume - restart operations after PCI error recovery + * @pdev: PCI device information struct + * + * Called to allow the driver to bring things back up after PCI error and/or + * reset recovery have finished + */ +static void ice_pci_err_resume(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!pf) { + dev_err(&pdev->dev, + "%s failed, device is unrecoverable\n", __func__); + return; + } + + if (test_bit(__ICE_SUSPENDED, pf->state)) { + dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", + __func__); + return; + } + + ice_do_reset(pf, ICE_RESET_PFR); + ice_service_task_restart(pf); + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); +} + +/** + * ice_pci_err_reset_prepare - prepare device driver for PCI reset + * @pdev: PCI device information struct + */ +static void ice_pci_err_reset_prepare(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (!test_bit(__ICE_SUSPENDED, pf->state)) { + ice_service_task_stop(pf); + + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) { + set_bit(__ICE_PFR_REQ, pf->state); + ice_prepare_for_reset(pf); + } + } +} + +/** + * ice_pci_err_reset_done - PCI reset done, device driver reset can begin + * @pdev: PCI device information struct + */ +static void ice_pci_err_reset_done(struct pci_dev *pdev) +{ + ice_pci_err_resume(pdev); +} + /* ice_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last @@ -2289,12 +2618,21 @@ static const struct pci_device_id ice_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); +static const struct pci_error_handlers ice_pci_err_handler = { + .error_detected = ice_pci_err_detected, + .slot_reset = ice_pci_err_slot_reset, + .reset_prepare = ice_pci_err_reset_prepare, + .reset_done = ice_pci_err_reset_done, + .resume = ice_pci_err_resume +}; + static struct pci_driver ice_driver = { .name = KBUILD_MODNAME, .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, .sriov_configure = ice_sriov_configure, + .err_handler = &ice_pci_err_handler }; /** @@ -2341,7 +2679,7 @@ static void __exit ice_module_exit(void) module_exit(ice_module_exit); /** - * ice_set_mac_address - NDO callback to set mac address + * ice_set_mac_address - NDO callback to set MAC address * @netdev: network interface device structure * @pi: pointer to an address structure * @@ -2378,14 +2716,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) return -EBUSY; } - /* When we change the mac address we also have to change the mac address - * based filter rules that were created previously for the old mac + /* When we change the MAC address we also have to change the MAC address + * based filter rules that were created previously for the old MAC * address. So first, we remove the old filter rule using ice_remove_mac * and then create a new filter rule using ice_add_mac. Note that for - * both these operations, we first need to form a "list" of mac - * addresses (even though in this case, we have only 1 mac address to be + * both these operations, we first need to form a "list" of MAC + * addresses (even though in this case, we have only 1 MAC address to be * added/removed) and this done using ice_add_mac_to_list. Depending on - * the ensuing operation this "list" of mac addresses is either to be + * the ensuing operation this "list" of MAC addresses is either to be * added or removed from the filter. */ err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr); @@ -2423,12 +2761,12 @@ free_lists: return err; } - /* change the netdev's mac address */ + /* change the netdev's MAC address */ memcpy(netdev->dev_addr, mac, netdev->addr_len); netdev_dbg(vsi->netdev, "updated mac address to %pM\n", netdev->dev_addr); - /* write new mac address to the firmware */ + /* write new MAC address to the firmware */ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; status = ice_aq_manage_mac_write(hw, mac, flags, NULL); if (status) { @@ -2470,7 +2808,7 @@ static void ice_set_rx_mode(struct net_device *netdev) * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added - * @vid: VLAN id + * @vid: VLAN ID * @flags: instructions from stack about fdb operation * @extack: netlink extended ack */ @@ -2510,11 +2848,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added - * @vid: VLAN id + * @vid: VLAN ID */ -static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid) +static int +ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + __always_unused u16 vid) { int err; @@ -2538,8 +2877,8 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting */ -static int ice_set_features(struct net_device *netdev, - netdev_features_t features) +static int +ice_set_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2568,8 +2907,8 @@ static int ice_set_features(struct net_device *netdev, } /** - * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI - * @vsi: VSI to setup vlan properties for + * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI + * @vsi: VSI to setup VLAN properties for */ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) { @@ -2601,6 +2940,7 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) if (err) return err; } + ice_vsi_cfg_dcb_rings(vsi); err = ice_vsi_cfg_lan_txqs(vsi); if (!err) @@ -2666,7 +3006,7 @@ static int ice_up_complete(struct ice_vsi *vsi) ice_service_task_schedule(pf); - return err; + return 0; } /** @@ -2693,8 +3033,8 @@ int ice_up(struct ice_vsi *vsi) * This function fetches stats from the ring considering the atomic operations * that needs to be performed to read u64 values in 32 bit machine. */ -static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, - u64 *bytes) +static void +ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) { unsigned int start; *pkts = 0; @@ -2911,6 +3251,8 @@ static void ice_update_pf_stats(struct ice_pf *pf) ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); + ice_update_dcb_stats(pf); + ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, &prev_ps->crc_errors, &cur_ps->crc_errors); @@ -3276,7 +3618,7 @@ static void ice_vsi_release_all(struct ice_pf *pf) if (!pf->vsi) return; - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; @@ -3289,47 +3631,31 @@ static void ice_vsi_release_all(struct ice_pf *pf) } /** - * ice_dis_vsi - pause a VSI - * @vsi: the VSI being paused + * ice_ena_vsi - resume a VSI + * @vsi: the VSI being resume * @locked: is the rtnl_lock already held */ -static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) +static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) { - if (test_bit(__ICE_DOWN, vsi->state)) - return; + int err = 0; - set_bit(__ICE_NEEDS_RESTART, vsi->state); + if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) + return err; + + clear_bit(__ICE_NEEDS_RESTART, vsi->state); + + if (vsi->netdev && vsi->type == ICE_VSI_PF) { + struct net_device *netd = vsi->netdev; - if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - if (!locked) { + if (locked) { + err = netd->netdev_ops->ndo_open(netd); + } else { rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + err = netd->netdev_ops->ndo_open(netd); rtnl_unlock(); - } else { - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); } } else { - ice_vsi_close(vsi); - } - } -} - -/** - * ice_ena_vsi - resume a VSI - * @vsi: the VSI being resume - */ -static int ice_ena_vsi(struct ice_vsi *vsi) -{ - int err = 0; - - if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && - vsi->netdev) { - if (netif_running(vsi->netdev)) { - rtnl_lock(); - err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - rtnl_unlock(); - } else { err = ice_vsi_open(vsi); } } @@ -3338,29 +3664,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi) } /** - * ice_pf_dis_all_vsi - Pause all VSIs on a PF - * @pf: the PF - */ -static void ice_pf_dis_all_vsi(struct ice_pf *pf) -{ - int v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_dis_vsi(pf->vsi[v], false); -} - -/** * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF + * @locked: is the rtnl_lock already held */ -static int ice_pf_ena_all_vsi(struct ice_pf *pf) +#ifdef CONFIG_DCB +int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) +#else +static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) +#endif /* CONFIG_DCB */ { int v; ice_for_each_vsi(pf, v) if (pf->vsi[v]) - if (ice_ena_vsi(pf->vsi[v])) + if (ice_ena_vsi(pf->vsi[v], locked)) return -EIO; return 0; @@ -3375,16 +3693,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) int i; /* loop through pf->vsi array and reinit the VSI if found */ - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { int err; if (!pf->vsi[i]) continue; - /* VF VSI rebuild isn't supported yet */ - if (pf->vsi[i]->type == ICE_VSI_VF) - continue; - err = ice_vsi_rebuild(pf->vsi[i]); if (err) { dev_err(&pf->pdev->dev, @@ -3412,7 +3726,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf) int i; /* loop through pf->vsi array and replay the VSI if found */ - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) continue; @@ -3479,6 +3793,8 @@ static void ice_rebuild(struct ice_pf *pf) if (err) goto err_sched_init_port; + ice_dcb_rebuild(pf); + /* reset search_hint of irq_trackers to 0 since interrupts are * reclaimed and could be allocated from beginning during VSI rebuild */ @@ -3512,7 +3828,7 @@ static void ice_rebuild(struct ice_pf *pf) } /* restart the VSIs that were rebuilt and running before the reset */ - err = ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf, false); if (err) { dev_err(&pf->pdev->dev, "error enabling VSIs\n"); /* no need to disable VSIs in tear down path in ice_rebuild() @@ -3521,9 +3837,7 @@ static void ice_rebuild(struct ice_pf *pf) goto err_vsi_rebuild; } - ice_reset_all_vfs(pf, true); - - for (i = 0; i < pf->num_alloc_vsi; i++) { + ice_for_each_vsi(pf, i) { bool link_up; if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF) @@ -3710,7 +4024,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) /** * ice_bridge_getlink - Get the hardware bridge mode * @skb: skb buff - * @pid: process id + * @pid: process ID * @seq: RTNL message seq * @dev: the netdev being configured * @filter_mask: filter mask passed in diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 413fdbbcc4d0..62571d33d0d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -5,7 +5,7 @@ /** * ice_aq_read_nvm - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) @@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) /** * ice_init_nvm - initializes NVM setting - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode @@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) u32 fla, gens_stat; u8 sr_size; - /* The SR size is stored regardless of the nvm programming mode + /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 56049739a250..124feaf0e730 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi, /** * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB * @start_node: pointer to the starting ice_sched_node struct in a sub-tree - * @teid: node teid to search + * @teid: node TEID to search * - * This function searches for a node matching the teid in the scheduling tree + * This function searches for a node matching the TEID in the scheduling tree * from the SW DB. The search is recursive and is restricted by the number of * layers it has searched through; stopping at the max supported layer. * @@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) return NULL; - /* Check if teid matches to any of the children nodes */ + /* Check if TEID matches to any of the children nodes */ for (i = 0; i < start_node->num_children; i++) if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) return start_node->children[i]; @@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) /** * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @cmd_opc: cmd opcode * @elems_req: number of elements to request * @buf: pointer to buffer @@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, /** * ice_aq_query_sched_elems - query scheduler elements - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @elems_req: number of elements to query * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, * * Query scheduling elements (0x0404) */ -static enum ice_status +enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_get_elem *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) @@ -138,31 +138,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, } /** - * ice_sched_query_elem - query element information from hw - * @hw: pointer to the hw struct - * @node_teid: node teid to be queried - * @buf: buffer to element information - * - * This function queries HW element information - */ -static enum ice_status -ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, - struct ice_aqc_get_elem *buf) -{ - u16 buf_size, num_elem_ret = 0; - enum ice_status status; - - buf_size = sizeof(*buf); - memset(buf, 0, buf_size); - buf->generic[0].node_teid = cpu_to_le32(node_teid); - status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, - NULL); - if (status || num_elem_ret != 1) - ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); - return status; -} - -/** * ice_sched_add_node - Insert the Tx scheduler node in SW DB * @pi: port information structure * @layer: Scheduler layer of the node @@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, /** * ice_aq_delete_sched_elems - delete scheduler elements - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @grps_req: number of groups to delete * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, } /** - * ice_sched_remove_elems - remove nodes from hw - * @hw: pointer to the hw struct + * ice_sched_remove_elems - remove nodes from HW + * @hw: pointer to the HW struct * @parent: pointer to the parent node * @num_nodes: number of nodes * @node_teids: array of node teids to be deleted * - * This function remove nodes from hw + * This function remove nodes from HW */ static enum ice_status ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, @@ -276,7 +251,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status || num_groups_removed != 1) - ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", + hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); return status; @@ -284,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, /** * ice_sched_get_first_node - get the first node of the given layer - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @parent: pointer the base node of the subtree * @layer: layer number * @@ -360,12 +336,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = le32_to_cpu(node->info.node_teid); - enum ice_status status; - status = ice_sched_remove_elems(hw, node->parent, 1, &teid); - if (status) - ice_debug(hw, ICE_DBG_SCHED, - "remove element failed %d\n", status); + ice_sched_remove_elems(hw, node->parent, 1, &teid); } parent = node->parent; /* root has no parent */ @@ -409,7 +381,7 @@ err_exit: /** * ice_aq_get_dflt_topo - gets default scheduler topology - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @lport: logical port number * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -439,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, /** * ice_aq_add_sched_elems - adds scheduling element - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @grps_req: the number of groups that are requested to be added * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -460,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, /** * ice_aq_suspend_sched_elems - suspend scheduler elements - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @elems_req: number of elements to suspend * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -481,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, /** * ice_aq_resume_sched_elems - resume scheduler elements - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @elems_req: number of elements to resume * @buf: pointer to buffer * @buf_size: buffer size in bytes @@ -502,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, /** * ice_aq_query_sched_res - query scheduler resource - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @buf_size: buffer size in bytes * @buf: pointer to buffer * @cd: pointer to command details structure or NULL @@ -521,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, } /** - * ice_sched_suspend_resume_elems - suspend or resume hw nodes - * @hw: pointer to the hw struct + * ice_sched_suspend_resume_elems - suspend or resume HW nodes + * @hw: pointer to the HW struct * @num_nodes: number of nodes * @node_teids: array of node teids to be suspended or resumed * @suspend: true means suspend / false means resume * - * This function suspends or resumes hw nodes + * This function suspends or resumes HW nodes */ static enum ice_status ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, @@ -561,10 +533,10 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, } /** - * ice_sched_clear_agg - clears the agg related information + * ice_sched_clear_agg - clears the aggregator related information * @hw: pointer to the hardware structure * - * This function removes agg list and free up agg related memory + * This function removes aggregator list and free up aggregator related memory * previously allocated. */ void ice_sched_clear_agg(struct ice_hw *hw) @@ -622,7 +594,7 @@ void ice_sched_clear_port(struct ice_port_info *pi) /** * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Cleanup scheduling elements from SW DB for all the ports */ @@ -646,16 +618,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw) } /** - * ice_sched_add_elems - add nodes to hw and SW DB + * ice_sched_add_elems - add nodes to HW and SW DB * @pi: port information structure * @tc_node: pointer to the branch node * @parent: pointer to the parent node * @layer: layer number to add nodes * @num_nodes: number of nodes * @num_nodes_added: pointer to num nodes added - * @first_node_teid: if new nodes are added then return the teid of first node + * @first_node_teid: if new nodes are added then return the TEID of first node * - * This function add nodes to hw as well as to SW DB for a given layer + * This function add nodes to HW as well as to SW DB for a given layer */ static enum ice_status ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, @@ -697,7 +669,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, &num_groups_added, NULL); if (status || num_groups_added != 1) { - ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", + hw->adminq.sq_last_status); devm_kfree(ice_hw_to_dev(hw), buf); return ICE_ERR_CFG; } @@ -748,7 +721,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, * @parent: pointer to parent node * @layer: layer number to add nodes * @num_nodes: number of nodes to be added - * @first_node_teid: pointer to the first node teid + * @first_node_teid: pointer to the first node TEID * @num_nodes_added: pointer to number of nodes added * * This function add nodes to a given layer. @@ -800,7 +773,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, *num_nodes_added += num_added; } - /* Don't modify the first node teid memory if the first node was + /* Don't modify the first node TEID memory if the first node was * added already in the above call. Instead send some temp * memory for all other recursive calls. */ @@ -832,7 +805,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, /** * ice_sched_get_qgrp_layer - get the current queue group layer number - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * This function returns the current queue group layer number */ @@ -844,7 +817,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) /** * ice_sched_get_vsi_layer - get the current VSI layer number - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * This function returns the current VSI layer number */ @@ -855,7 +828,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * 7 4 * 5 or less sw_entry_point_layer */ - /* calculate the vsi layer based on number of layers. */ + /* calculate the VSI layer based on number of layers. */ if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; @@ -973,7 +946,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) goto err_init_port; } - /* If the last node is a leaf node then the index of the Q group + /* If the last node is a leaf node then the index of the queue group * layer is two less than the number of elements. */ if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == @@ -1082,7 +1055,7 @@ sched_query_out: /** * ice_sched_find_node_in_subtree - Find node in part of base node subtree - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @base: pointer to the base node * @node: pointer to the node to search * @@ -1114,13 +1087,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, } /** - * ice_sched_get_free_qparent - Get a free lan or rdma q group node + * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node * @pi: port information structure * @vsi_handle: software VSI handle * @tc: branch number - * @owner: lan or rdma + * @owner: LAN or RDMA * - * This function retrieves a free lan or rdma q group node + * This function retrieves a free LAN or RDMA queue group node */ struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, @@ -1138,11 +1111,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (!vsi_ctx) return NULL; vsi_node = vsi_ctx->sched.vsi_node[tc]; - /* validate invalid VSI id */ + /* validate invalid VSI ID */ if (!vsi_node) goto lan_q_exit; - /* get the first q group node from VSI sub-tree */ + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ @@ -1158,12 +1131,12 @@ lan_q_exit: } /** - * ice_sched_get_vsi_node - Get a VSI node based on VSI id - * @hw: pointer to the hw struct + * ice_sched_get_vsi_node - Get a VSI node based on VSI ID + * @hw: pointer to the HW struct * @tc_node: pointer to the TC node * @vsi_handle: software VSI handle * - * This function retrieves a VSI node for a given VSI id from a given + * This function retrieves a VSI node for a given VSI ID from a given * TC branch */ static struct ice_sched_node * @@ -1188,7 +1161,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, /** * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @num_qs: number of queues * @num_nodes: num nodes array * @@ -1204,7 +1177,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); - /* calculate num nodes from q group to VSI layer */ + /* calculate num nodes from queue group to VSI layer */ for (i = qgl; i > vsil; i--) { /* round to the next integer if there is a remainder */ num = DIV_ROUND_UP(num, hw->max_children[i]); @@ -1220,10 +1193,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) * @vsi_handle: software VSI handle * @tc_node: pointer to the TC node * @num_nodes: pointer to the num nodes that needs to be added per layer - * @owner: node owner (lan or rdma) + * @owner: node owner (LAN or RDMA) * * This function adds the VSI child nodes to tree. It gets called for - * lan and rdma separately. + * LAN and RDMA separately. */ static enum ice_status ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, @@ -1271,44 +1244,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, } /** - * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree - * @pi: port information structure - * @vsi_node: pointer to the VSI node - * @num_nodes: pointer to the num nodes that needs to be removed per layer - * @owner: node owner (lan or rdma) - * - * This function removes the VSI child nodes from the tree. It gets called for - * lan and rdma separately. - */ -static void -ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, - struct ice_sched_node *vsi_node, u16 *num_nodes, - u8 owner) -{ - struct ice_sched_node *node, *next; - u8 i, qgl, vsil; - u16 num; - - qgl = ice_sched_get_qgrp_layer(pi->hw); - vsil = ice_sched_get_vsi_layer(pi->hw); - - for (i = qgl; i > vsil; i--) { - num = num_nodes[i]; - node = ice_sched_get_first_node(pi->hw, vsi_node, i); - while (node && num) { - next = node->sibling; - if (node->owner == owner && !node->num_children) { - ice_free_sched_node(pi, node); - num--; - } - node = next; - } - } -} - -/** * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * @@ -1427,7 +1364,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); - /* add vsi supported nodes to tc subtree */ + /* add VSI supported nodes to TC subtree */ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, num_nodes); } @@ -1446,7 +1383,6 @@ static enum ice_status ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { - u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; @@ -1454,7 +1390,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, enum ice_status status = 0; struct ice_hw *hw = pi->hw; u16 prev_numqs; - u8 i; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) @@ -1473,36 +1408,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, else return ICE_ERR_PARAM; - /* num queues are not changed */ - if (prev_numqs == new_numqs) + /* num queues are not changed or less than the previous number */ + if (new_numqs <= prev_numqs) return status; - - /* calculate number of nodes based on prev/new number of qs */ - if (prev_numqs) - ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes); - if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); - - if (prev_numqs > new_numqs) { - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) - new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i]; - - ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes, - owner); - } else { - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) - new_num_nodes[i] -= prev_num_nodes[i]; - - status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, - new_num_nodes, owner); - if (status) - return status; - } - + /* Keep the max number of queue configuration all the time. Update the + * tree only if number of queues > previous number of queues. This may + * leave some extra nodes in the tree if number of queues < previous + * number but that wouldn't harm anything. Removing those extra nodes + * may complicate the code if those nodes are part of SRL or + * individually rate limited. + */ + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, + new_num_nodes, owner); + if (status) + return status; vsi_ctx->sched.max_lanq[tc] = new_numqs; - return status; + return 0; } /** @@ -1511,7 +1435,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, * @vsi_handle: software VSI handle * @tc: TC number * @maxqs: max number of queues - * @owner: lan or rdma + * @owner: LAN or RDMA * @enable: TC enabled or disabled * * This function adds/updates VSI nodes based on the number of queues. If TC is @@ -1527,6 +1451,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, enum ice_status status = 0; struct ice_hw *hw = pi->hw; + ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; @@ -1535,7 +1460,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, return ICE_ERR_PARAM; vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); - /* suspend the VSI if tc is not enabled */ + /* suspend the VSI if TC is not enabled */ if (!enable) { if (vsi_node && vsi_node->in_use) { u32 teid = le32_to_cpu(vsi_node->info.node_teid); @@ -1586,7 +1511,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, } /** - * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry + * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry * @pi: port information structure * @vsi_handle: software VSI handle * @@ -1646,8 +1571,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; - u8 i, j = 0; + u8 i; + ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return status; mutex_lock(&pi->sched_lock); @@ -1655,8 +1581,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (!vsi_ctx) goto exit_sched_rm_vsi_cfg; - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + ice_for_each_traffic_class(i) { struct ice_sched_node *vsi_node, *tc_node; + u8 j = 0; tc_node = ice_sched_get_tc_node(pi, i); if (!tc_node) @@ -1689,7 +1616,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) ice_free_sched_node(pi, vsi_node); vsi_ctx->sched.vsi_node[i] = NULL; - /* clean up agg related vsi info if any */ + /* clean up aggregator related VSI info if any */ ice_sched_rm_agg_vsi_info(pi, vsi_handle); } if (owner == ICE_SCHED_NODE_OWNER_LAN) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index bee8221ad146..3902a8ad3025 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -24,6 +24,10 @@ struct ice_sched_agg_info { }; /* FW AQ command calls */ +enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd); enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_clear_port(struct ice_port_info *pi); diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 683f48824a29..17afe6acb18a 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -12,6 +12,7 @@ enum ice_status { ICE_ERR_PARAM = -1, ICE_ERR_NOT_IMPL = -2, ICE_ERR_NOT_READY = -3, + ICE_ERR_NOT_SUPPORTED = -4, ICE_ERR_BAD_PTR = -5, ICE_ERR_INVAL_SIZE = -6, ICE_ERR_DEVICE_NOT_SUPPORTED = -8, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 09d1c314b68f..ad6bb0fce5d1 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -19,7 +19,7 @@ * byte 6 = 0x2: to identify it as locally administered SA MAC * byte 12 = 0x81 & byte 13 = 0x00: * In case of VLAN filter first two bytes defines ether type (0x8100) - * and remaining two bytes are placeholder for programming a given VLAN id + * and remaining two bytes are placeholder for programming a given VLAN ID * In case of Ether type filter it is treated as header without VLAN tag * and byte 12 and 13 is used to program a given Ether type instead */ @@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, /** * ice_aq_alloc_free_res - command to allocate/free resources - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @num_entries: number of resource entries in buffer * @buf: Indirect buffer to hold data parameters and response * @buf_size: size of buffer for indirect commands @@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, /** * ice_init_def_sw_recp - initialize the recipe book keeping tables - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. @@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, /** * ice_aq_add_vsi - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * @@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, /** * ice_aq_free_vsi - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL @@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, /** * ice_aq_update_vsi - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * @@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, /** * ice_is_vsi_valid - check whether the VSI is valid or not - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * check whether the VSI is valid or not @@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) } /** - * ice_get_hw_vsi_num - return the hw VSI number - * @hw: pointer to the hw struct + * ice_get_hw_vsi_num - return the HW VSI number + * @hw: pointer to the HW struct * @vsi_handle: VSI handle * - * return the hw VSI number + * return the HW VSI number * Caution: call this function only if VSI is valid (ice_is_vsi_valid) */ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) @@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) /** * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * return the VSI context entry for a given VSI handle @@ -316,21 +316,21 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) /** * ice_save_vsi_ctx - save the VSI context for a given VSI handle - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: VSI handle * @vsi: VSI context pointer * * save the VSI context entry for a given VSI handle */ -static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, - struct ice_vsi_ctx *vsi) +static void +ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) { hw->vsi_ctx[vsi_handle] = vsi; } /** * ice_clear_vsi_ctx - clear the VSI context entry - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * clear the VSI context entry @@ -348,7 +348,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) /** * ice_clear_all_vsi_ctx - clear all the VSI context entries - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct */ void ice_clear_all_vsi_ctx(struct ice_hw *hw) { @@ -360,7 +360,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw) /** * ice_add_vsi - add VSI context to the hardware and VSI handle list - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle provided by drivers * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL @@ -383,7 +383,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, return status; tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!tmp_vsi_ctx) { - /* Create a new vsi context */ + /* Create a new VSI context */ tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp_vsi_ctx), GFP_KERNEL); if (!tmp_vsi_ctx) { @@ -398,12 +398,12 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; } - return status; + return 0; } /** * ice_free_vsi- free VSI context from hardware and VSI handle list - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle * @vsi_ctx: pointer to a VSI context struct * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources @@ -428,7 +428,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, /** * ice_update_vsi - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL @@ -447,8 +447,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, /** * ice_aq_alloc_free_vsi_list - * @hw: pointer to the hw struct - * @vsi_list_id: VSI list id returned or used for lookup + * @hw: pointer to the HW struct + * @vsi_list_id: VSI list ID returned or used for lookup * @lkup_type: switch rule filter lookup type * @opc: switch rules population command type - pass in the command opcode * @@ -504,7 +504,7 @@ ice_aq_alloc_free_vsi_list_exit: /** * ice_aq_sw_rules - add/update/remove switch rules - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @rule_list: pointer to switch rule population list * @rule_list_sz: total size of the rule list in bytes * @num_rules: number of switch rules in the rule_list @@ -643,21 +643,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->fltr_act == ICE_FWD_TO_VSI_LIST || fi->fltr_act == ICE_FWD_TO_Q || fi->fltr_act == ICE_FWD_TO_QGRP)) { - fi->lb_en = true; - /* Do not set lan_en to TRUE if + /* Setting LB for prune actions will result in replicated + * packets to the internal switch that will be dropped. + */ + if (fi->lkup_type != ICE_SW_LKUP_VLAN) + fi->lb_en = true; + + /* Set lan_en to TRUE if * 1. The switch is a VEB AND * 2 - * 2.1 The lookup is MAC with unicast addr for MAC, OR - * 2.2 The lookup is MAC_VLAN with unicast addr for MAC + * 2.1 The lookup is a directional lookup like ethertype, + * promiscuous, ethertype-MAC, promiscuous-VLAN + * and default-port OR + * 2.2 The lookup is VLAN, OR + * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR + * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. + * + * OR * - * In all other cases, the LAN enable has to be set to true. + * The switch is a VEPA. + * + * In all other cases, the LAN enable has to be set to false. */ - if (!(hw->evb_veb && - ((fi->lkup_type == ICE_SW_LKUP_MAC && - is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || - (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && - is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr))))) + if (hw->evb_veb) { + if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || + fi->lkup_type == ICE_SW_LKUP_PROMISC || + fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || + fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + fi->lkup_type == ICE_SW_LKUP_DFLT || + fi->lkup_type == ICE_SW_LKUP_VLAN || + (fi->lkup_type == ICE_SW_LKUP_MAC && + !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || + (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && + !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) + fi->lan_en = true; + } else { fi->lan_en = true; + } } } @@ -799,7 +821,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, * @hw: pointer to the hardware structure * @m_ent: the management entry for which sw marker needs to be added * @sw_marker: sw marker to tag the Rx descriptor with - * @l_id: large action resource id + * @l_id: large action resource ID * * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action @@ -811,8 +833,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; /* For software marker we need 3 large actions * 1. FWD action: FWD TO VSI or VSI LIST - * 2. GENERIC VALUE action to hold the profile id - * 3. GENERIC VALUE action to hold the software marker id + * 2. GENERIC VALUE action to hold the profile ID + * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; enum ice_status status; @@ -875,13 +897,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); - /* Update the action to point to the large action id */ + /* Update the action to point to the large action ID */ rx_tx->pdata.lkup_tx_rx.act = cpu_to_le32(ICE_SINGLE_ACT_PTR | ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M)); - /* Use the filter rule id of the previously created rule with single + /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large * action */ @@ -904,10 +926,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, * @hw: pointer to the hardware structure * @vsi_handle_arr: array of VSI handles to set in the VSI mapping * @num_vsi: number of VSI handles in the array - * @vsi_list_id: VSI list id generated as part of allocate resource + * @vsi_list_id: VSI list ID generated as part of allocate resource * - * Helper function to create a new entry of VSI list id to VSI mapping - * using the given VSI list id + * Helper function to create a new entry of VSI list ID to VSI mapping + * using the given VSI list ID */ static struct ice_vsi_list_map_info * ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, @@ -935,13 +957,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, * @hw: pointer to the hardware structure * @vsi_handle_arr: array of VSI handles to form a VSI list * @num_vsi: number of VSI handles in the array - * @vsi_list_id: VSI list id generated as part of allocate resource + * @vsi_list_id: VSI list ID generated as part of allocate resource * @remove: Boolean value to indicate if this is a remove action * @opc: switch rules population command type - pass in the command opcode * @lkup_type: lookup type of the filter * * Call AQ command to add a new switch rule or update existing switch rule - * using the given VSI list id + * using the given VSI list ID */ static enum ice_status ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, @@ -998,7 +1020,7 @@ exit: /** * ice_create_vsi_list_rule - Creates and populates a VSI list rule - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * @vsi_handle_arr: array of VSI handles to form a VSI list * @num_vsi: number of VSI handles in the array * @vsi_list_id: stores the ID of the VSI list to be created @@ -1092,7 +1114,7 @@ ice_create_pkt_fwd_rule_exit: * @f_info: filter information for switch rule * * Call AQ command to update a previously created switch rule with a - * VSI list id + * VSI list ID */ static enum ice_status ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) @@ -1119,7 +1141,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) /** * ice_update_sw_rule_bridge_mode - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Updates unicast switch filter rules based on VEB/VEPA mode */ @@ -1174,7 +1196,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Allocate a new VSI list and add two VSIs * to this list using switch rule command * Update the previously created switch rule with the - * newly created VSI list id + * newly created VSI list ID * if a VSI list was previously created * Add the new VSI to the previously created VSI list set * using the update switch rule command @@ -1255,7 +1277,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, return 0; /* Update the previously created VSI list set with - * the new VSI id passed in + * the new VSI ID passed in */ vsi_list_id = cur_fltr->fwd_id.vsi_list_id; opcode = ice_aqc_opc_update_sw_rules; @@ -1263,7 +1285,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, false, opcode, new_fltr->lkup_type); - /* update VSI list mapping info with new VSI id */ + /* update VSI list mapping info with new VSI ID */ if (!status) set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); } @@ -1305,7 +1327,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) * @hw: pointer to the hardware structure * @recp_id: lookup type for which VSI lists needs to be searched * @vsi_handle: VSI handle to be found in VSI list - * @vsi_list_id: VSI list id found containing vsi_handle + * @vsi_list_id: VSI list ID found containing vsi_handle * * Helper function to search a VSI list with single entry containing given VSI * handle element. This can be extended further to search VSI list with more @@ -1336,7 +1358,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, /** * ice_add_rule_internal - add rule for a given lookup type * @hw: pointer to the hardware structure - * @recp_id: lookup type (recipe id) for which rule has to be added + * @recp_id: lookup type (recipe ID) for which rule has to be added * @f_entry: structure containing MAC forwarding information * * Adds or updates the rule lists for a given recipe @@ -1381,7 +1403,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, /** * ice_remove_vsi_list_rule * @hw: pointer to the hardware structure - * @vsi_list_id: VSI list id generated as part of allocate resource + * @vsi_list_id: VSI list ID generated as part of allocate resource * @lkup_type: switch rule filter lookup type * * The VSI list should be emptied before this function is called to remove the @@ -1506,7 +1528,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, /** * ice_remove_rule_internal - Remove a filter rule of a given type * @hw: pointer to the hardware structure - * @recp_id: recipe id for which the rule needs to removed + * @recp_id: recipe ID for which the rule needs to removed * @f_entry: rule entry containing filter information */ static enum ice_status @@ -1556,7 +1578,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); if (status) goto exit; - /* if vsi count goes to zero after updating the vsi list */ + /* if VSI count goes to zero after updating the VSI list */ if (list_elem->vsi_count == 0) remove_rule = true; } @@ -1634,7 +1656,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; - /* update the src in case it is vsi num */ + /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; m_list_itr->fltr_info.src = hw_vsi_id; @@ -1710,7 +1732,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) ((u8 *)r_iter + (elem_sent * s_rule_size)); } - /* Fill up rule id based on the value returned from FW */ + /* Fill up rule ID based on the value returned from FW */ r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; @@ -1770,7 +1792,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); new_fltr = &f_entry->fltr_info; - /* VLAN id should only be 12 bits */ + /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) return ICE_ERR_PARAM; @@ -1828,7 +1850,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) } } } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { - /* Update existing VSI list to add new VSI id only if it used + /* Update existing VSI list to add new VSI ID only if it used * by one VLAN rule. */ cur_fltr = &v_list_itr->fltr_info; @@ -1838,7 +1860,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) /* If VLAN rule exists and VSI list being used by this rule is * referenced by more than 1 VLAN rule. Then create a new VSI * list appending previous VSI with new VSI and update existing - * VLAN rule to point to new VSI list id + * VLAN rule to point to new VSI list ID */ struct ice_fltr_info tmp_fltr; u16 vsi_handle_arr[2]; @@ -2170,7 +2192,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_entry; enum ice_status status = 0; - /* check to make sure VSI id is valid and within boundary */ + /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; @@ -2190,6 +2212,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, } /** + * ice_determine_promisc_mask + * @fi: filter info to parse + * + * Helper function to determine which ICE_PROMISC_ mask corresponds + * to given filter into. + */ +static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) +{ + u16 vid = fi->l_data.mac_vlan.vlan_id; + u8 *macaddr = fi->l_data.mac.mac_addr; + bool is_tx_fltr = false; + u8 promisc_mask = 0; + + if (fi->flag == ICE_FLTR_TX) + is_tx_fltr = true; + + if (is_broadcast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; + else if (is_multicast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; + else if (is_unicast_ether_addr(macaddr)) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; + if (vid) + promisc_mask |= is_tx_fltr ? + ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; + + return promisc_mask; +} + +/** + * ice_remove_promisc - Remove promisc based filter rules + * @hw: pointer to the hardware structure + * @recp_id: recipe ID for which the rule needs to removed + * @v_list: list of promisc entries + */ +static enum ice_status +ice_remove_promisc(struct ice_hw *hw, u8 recp_id, + struct list_head *v_list) +{ + struct ice_fltr_list_entry *v_list_itr, *tmp; + + list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { + v_list_itr->status = + ice_remove_rule_internal(hw, recp_id, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; + } + return 0; +} + +/** + * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to clear mode + * @promisc_mask: mask of promiscuous config bits to clear + * @vid: VLAN ID to clear VLAN promiscuous + */ +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *fm_entry, *tmp; + struct list_head remove_list_head; + struct ice_fltr_mgmt_list_entry *itr; + struct list_head *rule_head; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + u8 recipe_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + + if (vid) + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + else + recipe_id = ICE_SW_LKUP_PROMISC; + + rule_head = &sw->recp_list[recipe_id].filt_rules; + rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; + + INIT_LIST_HEAD(&remove_list_head); + + mutex_lock(rule_lock); + list_for_each_entry(itr, rule_head, list_entry) { + u8 fltr_promisc_mask = 0; + + if (!ice_vsi_uses_fltr(itr, vsi_handle)) + continue; + + fltr_promisc_mask |= + ice_determine_promisc_mask(&itr->fltr_info); + + /* Skip if filter is not completely specified by given mask */ + if (fltr_promisc_mask & ~promisc_mask) + continue; + + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, + &remove_list_head, + &itr->fltr_info); + if (status) { + mutex_unlock(rule_lock); + goto free_fltr_list; + } + } + mutex_unlock(rule_lock); + + status = ice_remove_promisc(hw, recipe_id, &remove_list_head); + +free_fltr_list: + list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { + list_del(&fm_entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), fm_entry); + } + + return status; +} + +/** + * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @vid: VLAN ID to set VLAN promiscuous + */ +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) +{ + enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; + struct ice_fltr_list_entry f_list_entry; + struct ice_fltr_info new_fltr; + enum ice_status status = 0; + bool is_tx_fltr; + u16 hw_vsi_id; + int pkt_type; + u8 recipe_id; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); + + memset(&new_fltr, 0, sizeof(new_fltr)); + + if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; + new_fltr.l_data.mac_vlan.vlan_id = vid; + recipe_id = ICE_SW_LKUP_PROMISC_VLAN; + } else { + new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; + recipe_id = ICE_SW_LKUP_PROMISC; + } + + /* Separate filters must be set for each direction/packet type + * combination, so we will loop over the mask value, store the + * individual type, and clear it out in the input mask as it + * is found. + */ + while (promisc_mask) { + u8 *mac_addr; + + pkt_type = 0; + is_tx_fltr = false; + + if (promisc_mask & ICE_PROMISC_UCAST_RX) { + promisc_mask &= ~ICE_PROMISC_UCAST_RX; + pkt_type = UCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { + promisc_mask &= ~ICE_PROMISC_UCAST_TX; + pkt_type = UCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { + promisc_mask &= ~ICE_PROMISC_MCAST_RX; + pkt_type = MCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { + promisc_mask &= ~ICE_PROMISC_MCAST_TX; + pkt_type = MCAST_FLTR; + is_tx_fltr = true; + } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { + promisc_mask &= ~ICE_PROMISC_BCAST_RX; + pkt_type = BCAST_FLTR; + } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { + promisc_mask &= ~ICE_PROMISC_BCAST_TX; + pkt_type = BCAST_FLTR; + is_tx_fltr = true; + } + + /* Check for VLAN promiscuous flag */ + if (promisc_mask & ICE_PROMISC_VLAN_RX) { + promisc_mask &= ~ICE_PROMISC_VLAN_RX; + } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { + promisc_mask &= ~ICE_PROMISC_VLAN_TX; + is_tx_fltr = true; + } + + /* Set filter DA based on packet type */ + mac_addr = new_fltr.l_data.mac.mac_addr; + if (pkt_type == BCAST_FLTR) { + eth_broadcast_addr(mac_addr); + } else if (pkt_type == MCAST_FLTR || + pkt_type == UCAST_FLTR) { + /* Use the dummy ether header DA */ + ether_addr_copy(mac_addr, dummy_eth_header); + if (pkt_type == MCAST_FLTR) + mac_addr[0] |= 0x1; /* Set multicast bit */ + } + + /* Need to reset this to zero for all iterations */ + new_fltr.flag = 0; + if (is_tx_fltr) { + new_fltr.flag |= ICE_FLTR_TX; + new_fltr.src = hw_vsi_id; + } else { + new_fltr.flag |= ICE_FLTR_RX; + new_fltr.src = hw->port_info->lport; + } + + new_fltr.fltr_act = ICE_FWD_TO_VSI; + new_fltr.vsi_handle = vsi_handle; + new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; + f_list_entry.fltr_info = new_fltr; + + status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); + if (status) + goto set_promisc_exit; + } + +set_promisc_exit: + return status; +} + +/** + * ice_set_vlan_vsi_promisc + * @hw: pointer to the hardware structure + * @vsi_handle: VSI handle to configure + * @promisc_mask: mask of promiscuous config bits + * @rm_vlan_promisc: Clear VLANs VSI promisc mode + * + * Configure VSI with all associated VLANs to given promiscuous mode(s) + */ +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_list_entry *list_itr, *tmp; + struct list_head vsi_list_head; + struct list_head *vlan_head; + struct mutex *vlan_lock; /* Lock to protect filter rule list */ + enum ice_status status; + u16 vlan_id; + + INIT_LIST_HEAD(&vsi_list_head); + vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; + mutex_lock(vlan_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, + &vsi_list_head); + mutex_unlock(vlan_lock); + if (status) + goto free_fltr_list; + + list_for_each_entry(list_itr, &vsi_list_head, list_entry) { + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; + if (rm_vlan_promisc) + status = ice_clear_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + else + status = ice_set_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id); + if (status) + break; + } + +free_fltr_list: + list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { + list_del(&list_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_itr); + } + return status; +} + +/** * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from @@ -2224,12 +2531,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, case ICE_SW_LKUP_VLAN: ice_remove_vlan(hw, &remove_list_head); break; + case ICE_SW_LKUP_PROMISC: + case ICE_SW_LKUP_PROMISC_VLAN: + ice_remove_promisc(hw, lkup, &remove_list_head); + break; case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: - case ICE_SW_LKUP_PROMISC: case ICE_SW_LKUP_DFLT: - case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_LAST: default: ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); @@ -2263,7 +2572,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) * ice_replay_vsi_fltr - Replay filters for requested VSI * @hw: pointer to the hardware structure * @vsi_handle: driver VSI handle - * @recp_id: Recipe id for which rules need to be replayed + * @recp_id: Recipe ID for which rules need to be replayed * @list_head: list for which filters need to be replayed * * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. @@ -2287,7 +2596,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, f_entry.fltr_info = itr->fltr_info; if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && itr->fltr_info.vsi_handle == vsi_handle) { - /* update the src in case it is vsi num */ + /* update the src in case it is VSI num */ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) f_entry.fltr_info.src = hw_vsi_id; status = ice_add_rule_internal(hw, recp_id, &f_entry); @@ -2302,7 +2611,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; - /* update the src in case it is vsi num */ + /* update the src in case it is VSI num */ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) f_entry.fltr_info.src = hw_vsi_id; if (recp_id == ICE_SW_LKUP_VLAN) @@ -2342,7 +2651,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) /** * ice_rm_all_sw_replay_rule_info - deletes filter replay rules - * @hw: pointer to the hw struct + * @hw: pointer to the HW struct * * Deletes the filter replay rules. */ diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index d5ef0bd58bf9..64a2fecfce20 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -44,7 +44,7 @@ enum ice_sw_lkup_type { ICE_SW_LKUP_LAST }; -/* type of filter src id */ +/* type of filter src ID */ enum ice_src_id { ICE_SRC_ID_UNKNOWN = 0, ICE_SRC_ID_VSI, @@ -95,8 +95,8 @@ struct ice_fltr_info { /* Depending on filter action */ union { - /* queue id in case of ICE_FWD_TO_Q and starting - * queue id in case of ICE_FWD_TO_QGRP. + /* queue ID in case of ICE_FWD_TO_Q and starting + * queue ID in case of ICE_FWD_TO_QGRP. */ u16 q_id:11; u16 hw_vsi_id:10; @@ -143,7 +143,7 @@ struct ice_sw_recipe { DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); }; -/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ +/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */ struct ice_vsi_list_map_info { struct list_head list_entry; DECLARE_BITMAP(vsi_map, ICE_MAX_VSI); @@ -165,7 +165,7 @@ struct ice_fltr_list_entry { * used for VLAN membership. */ struct ice_fltr_mgmt_list_entry { - /* back pointer to VSI list id to VSI list mapping */ + /* back pointer to VSI list ID to VSI list mapping */ struct ice_vsi_list_map_info *vsi_list_info; u16 vsi_count; #define ICE_INVAL_LG_ACT_INDEX 0xffff @@ -178,6 +178,17 @@ struct ice_fltr_mgmt_list_entry { u8 counter_index; }; +enum ice_promisc_flags { + ICE_PROMISC_UCAST_RX = 0x1, + ICE_PROMISC_UCAST_TX = 0x2, + ICE_PROMISC_MCAST_RX = 0x4, + ICE_PROMISC_MCAST_TX = 0x8, + ICE_PROMISC_BCAST_RX = 0x10, + ICE_PROMISC_BCAST_TX = 0x20, + ICE_PROMISC_VLAN_RX = 0x40, + ICE_PROMISC_VLAN_TX = 0x80, +}; + /* VSI related commands */ enum ice_status ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, @@ -199,10 +210,22 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); -enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); +enum ice_status +ice_add_vlan(struct ice_hw *hw, struct list_head *m_list); enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); + +/* Promisc/defport setup for VSIs */ enum ice_status ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction); +enum ice_status +ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + u16 vid); +enum ice_status +ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + bool rm_vlan_promisc); enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c289d97f477d..259f118c7d8b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -6,6 +6,7 @@ #include <linux/prefetch.h> #include <linux/mm.h> #include "ice.h" +#include "ice_dcb_lib.h" #define ICE_RX_HDR_SIZE 256 @@ -100,8 +101,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring) * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, - int napi_budget) +static bool +ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget) { unsigned int total_bytes = 0, total_pkts = 0; unsigned int budget = vsi->work_lmt; @@ -236,9 +237,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) if (!tx_ring->tx_buf) return -ENOMEM; - /* round up to nearest 4K */ + /* round up to nearest page */ tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), - 4096); + PAGE_SIZE); tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { @@ -282,8 +283,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) if (!rx_buf->page) continue; - dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_pages(rx_buf->page, 0); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(dev, rx_buf->dma, + rx_buf->page_offset, + ICE_RXBUF_2048, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); rx_buf->page = NULL; rx_buf->page_offset = 0; @@ -339,9 +349,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) if (!rx_ring->rx_buf) return -ENOMEM; - /* round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); - rx_ring->size = ALIGN(rx_ring->size, 4096); + /* round up to nearest page */ + rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), + PAGE_SIZE); rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { @@ -389,8 +399,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) * Returns true if the page was successfully allocated or * reused. */ -static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, - struct ice_rx_buf *bi) +static bool +ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) { struct page *page = bi->page; dma_addr_t dma; @@ -409,7 +419,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -423,6 +434,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring, bi->dma = dma; bi->page = page; bi->page_offset = 0; + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; return true; } @@ -444,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (!rx_ring->netdev || !cleaned_count) return false; - /* get the RX descriptor and buffer based on next_to_use */ + /* get the Rx descriptor and buffer based on next_to_use */ rx_desc = ICE_RX_DESC(rx_ring, ntu); bi = &rx_ring->rx_buf[ntu]; @@ -452,6 +465,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) if (!ice_alloc_mapped_page(rx_ring, bi)) goto no_bufs; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + ICE_RXBUF_2048, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -497,61 +516,43 @@ static bool ice_page_is_reserved(struct page *page) } /** - * ice_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_buf: buffer containing page to add - * @rx_desc: descriptor containing length of buffer written by hardware - * @skb: sk_buf to place the data into - * - * This function will add the data contained in rx_buf->page to the skb. - * This is done either through a direct copy if the data in the buffer is - * less than the skb header size, otherwise it will just attach the page as - * a frag to the skb. + * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse + * @rx_buf: Rx buffer to adjust + * @size: Size of adjustment * - * The function will then update the page offset if necessary and return - * true if the buffer can be reused by the adapter. + * Update the offset within page so that Rx buf will be ready to be reused. + * For systems with PAGE_SIZE < 8192 this function will flip the page offset + * so the second half of page assigned to Rx buffer will be used, otherwise + * the offset is moved by the @size bytes */ -static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) +static void +ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) { #if (PAGE_SIZE < 8192) - unsigned int truesize = ICE_RXBUF_2048; + /* flip page offset to other buffer */ + rx_buf->page_offset ^= size; #else - unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; - unsigned int truesize; -#endif /* PAGE_SIZE < 8192) */ - - struct page *page; - unsigned int size; - - size = le16_to_cpu(rx_desc->wb.pkt_len) & - ICE_RX_FLX_DESC_PKT_LEN_M; - - page = rx_buf->page; + /* move offset up to the next cache line */ + rx_buf->page_offset += size; +#endif +} +/** + * ice_can_reuse_rx_page - Determine if page can be reused for another Rx + * @rx_buf: buffer containing the page + * + * If page is reusable, we have a green light for calling ice_reuse_rx_page, + * which will assign the current buffer to the buffer that next_to_alloc is + * pointing to; otherwise, the DMA mapping needs to be destroyed and + * page freed + */ +static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) +{ #if (PAGE_SIZE >= 8192) - truesize = ALIGN(size, L1_CACHE_BYTES); -#endif /* PAGE_SIZE >= 8192) */ - - /* will the data fit in the skb we allocated? if so, just - * copy it as it is pretty small anyway - */ - if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buf->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ice_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, 0); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buf->page_offset, size, truesize); + unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; +#endif + unsigned int pagecnt_bias = rx_buf->pagecnt_bias; + struct page *page = rx_buf->page; /* avoid re-using remote pages */ if (unlikely(ice_page_is_reserved(page))) @@ -559,36 +560,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) + if (unlikely((page_count(page) - pagecnt_bias) > 1)) return false; - - /* flip page offset to other buffer */ - rx_buf->page_offset ^= truesize; #else - /* move offset up to the next cache line */ - rx_buf->page_offset += truesize; - if (rx_buf->page_offset > last_offset) return false; #endif /* PAGE_SIZE < 8192) */ - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. */ - get_page(rx_buf->page); + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buf->pagecnt_bias = USHRT_MAX; + } return true; } /** + * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag + * @rx_buf: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: packet length from rx_desc + * + * This function will add the data contained in rx_buf->page to the skb. + * It will just attach the page as a frag to the skb. + * The function will then update the page offset. + */ +static void +ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, + unsigned int size) +{ +#if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); +#else + unsigned int truesize = ICE_RXBUF_2048; +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->page_offset, size, truesize); + + /* page is being used so we must update the page offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); +} + +/** * ice_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter */ -static void ice_reuse_rx_page(struct ice_ring *rx_ring, - struct ice_rx_buf *old_buf) +static void +ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) { u16 nta = rx_ring->next_to_alloc; struct ice_rx_buf *new_buf; @@ -599,121 +625,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buf = *old_buf; + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buf->dma = old_buf->dma; + new_buf->page = old_buf->page; + new_buf->page_offset = old_buf->page_offset; + new_buf->pagecnt_bias = old_buf->pagecnt_bias; } /** - * ice_fetch_rx_buf - Allocate skb and populate it + * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * @rx_ring: Rx descriptor ring to transact packets on - * @rx_desc: descriptor containing info written by hardware + * @skb: skb to be used + * @size: size of buffer to add to skb * - * This function allocates an skb on the fly, and populates it with the page - * data from the current receive descriptor, taking care to set up the skb - * correctly, as well as handling calling the page recycle function if - * necessary. + * This function will pull an Rx buffer from the ring and synchronize it + * for use by the CPU. */ -static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc) +static struct ice_rx_buf * +ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, + const unsigned int size) { struct ice_rx_buf *rx_buf; - struct sk_buff *skb; - struct page *page; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; - page = rx_buf->page; - prefetchw(page); + prefetchw(rx_buf->page); + *skb = rx_buf->skb; - skb = rx_buf->skb; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, + rx_buf->page_offset, size, + DMA_FROM_DEVICE); - if (likely(!skb)) { - u8 *page_addr = page_address(page) + rx_buf->page_offset; + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buf->pagecnt_bias--; - /* prefetch first cache line of first page */ - prefetch(page_addr); + return rx_buf; +} + +/** + * ice_construct_skb - Allocate skb and populate it + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from + * @size: the length of the packet + * + * This function allocates an skb. It then populates it with the page + * data from the current receive descriptor, taking care to set up the + * skb correctly. + */ +static struct sk_buff * +ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + unsigned int size) +{ + void *va = page_address(rx_buf->page) + rx_buf->page_offset; + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); #if L1_CACHE_BYTES < 128 - prefetch((void *)(page_addr + L1_CACHE_BYTES)); + prefetch((u8 *)va + L1_CACHE_BYTES); #endif /* L1_CACHE_BYTES */ - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_buf_failed++; - return NULL; - } - - /* we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; - skb_record_rx_queue(skb, rx_ring->q_index); - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, - ICE_RXBUF_2048, - DMA_FROM_DEVICE); + skb_record_rx_queue(skb, rx_ring->q_index); + /* Determine available headroom for copy */ + headlen = size; + if (headlen > ICE_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE); - rx_buf->skb = NULL; - } + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - /* pull page into skb */ - if (ice_add_rx_frag(rx_buf, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; + /* if we exhaust the linear part then add what is left as a frag */ + size -= headlen; + if (size) { +#if (PAGE_SIZE >= 8192) + unsigned int truesize = SKB_DATA_ALIGN(size); +#else + unsigned int truesize = ICE_RXBUF_2048; +#endif + skb_add_rx_frag(skb, 0, rx_buf->page, + rx_buf->page_offset + headlen, size, truesize); + /* buffer is used by skb, update page_offset */ + ice_rx_buf_adjust_pg_offset(rx_buf, truesize); } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + /* buffer is unused, reset bias back to rx_buf; data was copied + * onto skb's linear part so there's no need for adjusting + * page offset and we can reuse this buffer as-is + */ + rx_buf->pagecnt_bias++; } - /* clear contents of buffer_info */ - rx_buf->page = NULL; - return skb; } /** - * ice_pull_tail - ice specific version of skb_pull_tail - * @skb: pointer to current skb being adjusted + * ice_put_rx_buf - Clean up used buffer and either recycle or free + * @rx_ring: Rx descriptor ring to transact packets on + * @rx_buf: Rx buffer to pull data from * - * This function is an ice specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. + * This function will clean up the contents of the rx_buf. It will + * either recycle the buffer or unmap it and free the associated resources. */ -static void ice_pull_tail(struct sk_buff *skb) +static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned int pull_len; - unsigned char *va; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + /* hand second half of page back to the ring */ + if (ice_can_reuse_rx_page(rx_buf)) { + ice_reuse_rx_page(rx_ring, rx_buf); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE, + DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); + } - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; + /* clear contents of buffer_info */ + rx_buf->page = NULL; + rx_buf->skb = NULL; } /** @@ -730,10 +767,6 @@ static void ice_pull_tail(struct sk_buff *skb) */ static bool ice_cleanup_headers(struct sk_buff *skb) { - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ice_pull_tail(skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -751,8 +784,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb) * The status_error_len doesn't need to be shifted because it begins * at offset zero. */ -static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, - const u16 stat_err_bits) +static bool +ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) { return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits)); @@ -769,9 +802,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. */ -static bool ice_is_non_eop(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb) +static bool +ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb) { u32 ntc = rx_ring->next_to_clean + 1; @@ -838,8 +871,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, * * skb->protocol must be set before this function is called */ -static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, - union ice_32b_rx_flex_desc *rx_desc, u8 ptype) +static void +ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, + union ice_32b_rx_flex_desc *rx_desc, u8 ptype) { struct ice_rx_ptype_decoded decoded; u32 rx_error, rx_status; @@ -909,9 +943,10 @@ checksum_fail: * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ -static void ice_process_skb_fields(struct ice_ring *rx_ring, - union ice_32b_rx_flex_desc *rx_desc, - struct sk_buff *skb, u8 ptype) +static void +ice_process_skb_fields(struct ice_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc, + struct sk_buff *skb, u8 ptype) { ice_rx_hash(rx_ring, rx_desc, skb, ptype); @@ -925,18 +960,17 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up - * @vlan_tag: vlan tag for packet + * @vlan_tag: VLAN tag for packet * * This function sends the completed packet (via. skb) up the stack using - * gro receive functions (with/without vlan tag) + * gro receive functions (with/without VLAN tag) */ -static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, - u16 vlan_tag) +static void +ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) { if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (vlan_tag & VLAN_VID_MASK)) { + (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - } napi_gro_receive(&rx_ring->q_vector->napi, skb); } @@ -958,10 +992,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); bool failure = false; - /* start the loop to process RX packets bounded by 'budget' */ + /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; + struct ice_rx_buf *rx_buf; struct sk_buff *skb; + unsigned int size; u16 stat_err_bits; u16 vlan_tag = 0; u8 rx_ptype; @@ -973,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) cleaned_count = 0; } - /* get the RX desc from RX ring based on 'next_to_clean' */ + /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); /* status_error_len will always be zero for unused descriptors @@ -991,11 +1027,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) */ dma_rmb(); + size = le16_to_cpu(rx_desc->wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M; + + rx_buf = ice_get_rx_buf(rx_ring, &skb, size); /* allocate (if needed) and populate skb */ - skb = ice_fetch_rx_buf(rx_ring, rx_desc); - if (!skb) + if (skb) + ice_add_rx_frag(rx_buf, skb, size); + else + skb = ice_construct_skb(rx_ring, rx_buf, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buf->pagecnt_bias++; break; + } + ice_put_rx_buf(rx_ring, rx_buf); cleaned_count++; /* skip if it is NOP desc */ @@ -1049,17 +1098,247 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) } /** + * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic + * @port_info: port_info structure containing the current link speed + * @avg_pkt_size: average size of Tx or Rx packets based on clean routine + * @itr: itr value to update + * + * Calculate how big of an increment should be applied to the ITR value passed + * in based on wmem_default, SKB overhead, Ethernet overhead, and the current + * link speed. + * + * The following is a calculation derived from: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to: + * + * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 + * ITR = -------------------------------------------- * -------------- + * rate pkt_size + 640 + */ +static unsigned int +ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, + unsigned int avg_pkt_size, + unsigned int itr) +{ + switch (port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_50GB: + itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_40GB: + itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_25GB: + itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_20GB: + itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + case ICE_AQ_LINK_SPEED_10GB: + /* fall through */ + default: + itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), + avg_pkt_size + 640); + break; + } + + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + + return itr; +} + +/** + * ice_update_itr - update the adaptive ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + */ +static void +ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) +{ + unsigned long next_update = jiffies; + unsigned int packets, bytes, itr; + bool container_is_rx; + + if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) + return; + + /* If itr_countdown is set it means we programmed an ITR within + * the last 4 interrupt cycles. This has a side effect of us + * potentially firing an early interrupt. In order to work around + * this we need to throw out any data received for a few + * interrupts following the update. + */ + if (q_vector->itr_countdown) { + itr = rc->target_itr; + goto clear_counts; + } + + container_is_rx = (&q_vector->rx == rc); + /* For Rx we want to push the delay up and default to low latency. + * for Tx we want to pull the delay down and default to high latency. + */ + itr = container_is_rx ? + ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : + ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; + + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. + */ + if (time_after(next_update, rc->next_update)) + goto clear_counts; + + packets = rc->total_pkts; + bytes = rc->total_bytes; + + if (container_is_rx) { + /* If Rx there are 1 to 4 packets and bytes are less than + * 9000 assume insufficient data to use bulk rate limiting + * approach unless Tx is already in bulk rate limiting. We + * are likely latency driven. + */ + if (packets && packets < 4 && bytes < 9000 && + (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { + itr = ICE_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size_and_speed; + } + } else if (packets < 4) { + /* If we have Tx and Rx ITR maxed and Tx ITR is running in + * bulk mode and we are receiving 4 or fewer packets just + * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so + * that the Rx can relax. + */ + if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && + (q_vector->rx.target_itr & ICE_ITR_MASK) == + ICE_ITR_ADAPTIVE_MAX_USECS) + goto clear_counts; + } else if (packets > 32) { + /* If we have processed over 32 packets in a single interrupt + * for Tx assume we need to switch over to "bulk" mode. + */ + rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; + } + + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * Between 4 and 56 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 56) { + itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; + if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { + itr &= ICE_ITR_ADAPTIVE_LATENCY; + itr += ICE_ITR_ADAPTIVE_MAX_USECS; + } + goto clear_counts; + } + + if (packets <= 256) { + itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); + itr &= ICE_ITR_MASK; + + /* Between 56 and 112 is our "goldilocks" zone where we are + * working out "just right". Just report that our current + * ITR is good for us. + */ + if (packets <= 112) + goto clear_counts; + + /* If packet count is 128 or greater we are likely looking + * at a slight overrun of the delay we want. Try halving + * our delay to see if that will cut the number of packets + * in half per interrupt. + */ + itr >>= 1; + itr &= ICE_ITR_MASK; + if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) + itr = ICE_ITR_ADAPTIVE_MIN_USECS; + + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since + * number of packets is greater than 256. We are just going to have + * to compute a value and try to bring the count under control, + * though for smaller packet sizes there isn't much we can do as + * NAPI polling will likely be kicking in sooner rather than later. + */ + itr = ICE_ITR_ADAPTIVE_BULK; + +adjust_by_size_and_speed: + + /* based on checks above packets cannot be 0 so division is safe */ + itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, + bytes / packets, itr); + +clear_counts: + /* write back value */ + rc->target_itr = itr; + + /* next update should occur within next jiffy */ + rc->next_update = next_update + 1; + + rc->total_bytes = 0; + rc->total_pkts = 0; +} + +/** * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register * @itr_idx: interrupt throttling index - * @reg_itr: interrupt throttling value adjusted based on ITR granularity + * @itr: interrupt throttling value in usecs */ -static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) +static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) { + /* The itr value is reported in microseconds, and the register value is + * recorded in 2 microsecond units. For this reason we only need to + * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this + * granularity as a shift instead of division. The mask makes sure the + * ITR value is never odd so we don't accidentally write into the field + * prior to the ITR field. + */ + itr &= ICE_ITR_MASK; + return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | - (reg_itr << GLINT_DYN_CTL_INTERVAL_S); + (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); } +/* The act of updating the ITR will cause it to immediately trigger. In order + * to prevent this from throwing off adaptive update statistics we defer the + * update so that it can only happen so often. So after either Tx or Rx are + * updated we make the adaptive scheme wait until either the ITR completely + * expires via the next_update expiration or we have been through at least + * 3 interrupts. + */ +#define ITR_COUNTDOWN_START 3 + /** * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt * @vsi: the VSI associated with the q_vector @@ -1068,10 +1347,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) static void ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { - struct ice_hw *hw = &vsi->back->hw; - struct ice_ring_container *rc; + struct ice_ring_container *tx = &q_vector->tx; + struct ice_ring_container *rx = &q_vector->rx; u32 itr_val; + /* This will do nothing if dynamic updates are not enabled */ + ice_update_itr(q_vector, tx); + ice_update_itr(q_vector, rx); + /* This block of logic allows us to get away with only updating * one ITR value with each interrupt. The idea is to perform a * pseudo-lazy update with the following criteria. @@ -1080,35 +1363,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) * 2. If we must reduce an ITR that is given highest priority. * 3. We then give priority to increasing ITR based on amount. */ - if (q_vector->rx.target_itr < q_vector->rx.current_itr) { - rc = &q_vector->rx; + if (rx->target_itr < rx->current_itr) { /* Rx ITR needs to be reduced, this is highest priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || - ((q_vector->rx.target_itr - q_vector->rx.current_itr) < - (q_vector->tx.target_itr - q_vector->tx.current_itr))) { - rc = &q_vector->tx; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if ((tx->target_itr < tx->current_itr) || + ((rx->target_itr - rx->current_itr) < + (tx->target_itr - tx->current_itr))) { /* Tx ITR needs to be reduced, this is second priority * Tx ITR needs to be increased more than Rx, fourth priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; - } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { - rc = &q_vector->rx; + itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); + tx->current_itr = tx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; + } else if (rx->current_itr != rx->target_itr) { /* Rx ITR needs to be increased, third priority */ - itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); - rc->current_itr = rc->target_itr; + itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); + rx->current_itr = rx->target_itr; + q_vector->itr_countdown = ITR_COUNTDOWN_START; } else { /* Still have to re-enable the interrupts */ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + if (q_vector->itr_countdown) + q_vector->itr_countdown--; } - if (!test_bit(__ICE_DOWN, vsi->state)) { - int vector = vsi->hw_base_vector + q_vector->v_idx; - - wr32(hw, GLINT_DYN_CTL(vector), itr_val); - } + if (!test_bit(__ICE_DOWN, vsi->state)) + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx), + itr_val); } /** @@ -1354,7 +1638,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ice_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -1480,7 +1764,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) } /** - * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW * @tx_ring: ring to send buffer on * @first: pointer to struct ice_tx_buf * @@ -1506,7 +1790,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) * to the encapsulated ethertype. */ skb->protocol = vlan_get_protocol(skb); - goto out; + return 0; } /* if we have a HW VLAN tag being added, default to the HW one */ @@ -1528,8 +1812,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) first->tx_flags |= ICE_TX_FLAGS_SW_VLAN; } -out: - return 0; + return ice_tx_prepare_vlan_flags_dcb(tx_ring, first); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index fc358ea81816..c75d9fd12a68 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -45,8 +45,13 @@ #define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_SW_VLAN BIT(2) #define ICE_TX_FLAGS_VLAN_M 0xffff0000 +#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 +#define ICE_TX_FLAGS_VLAN_PR_S 29 #define ICE_TX_FLAGS_VLAN_S 16 +#define ICE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + struct ice_tx_buf { struct ice_tx_desc *next_to_watch; struct sk_buff *skb; @@ -73,6 +78,7 @@ struct ice_rx_buf { dma_addr_t dma; struct page *page; unsigned int page_offset; + u16 pagecnt_bias; }; struct ice_q_stats { @@ -124,10 +130,17 @@ enum ice_rx_dtype { #define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) #define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) -#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ +#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ +#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK) +#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 +#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 +#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA +#define ICE_ITR_ADAPTIVE_LATENCY 0x8000 +#define ICE_ITR_ADAPTIVE_BULK 0x0000 + #define ICE_DFLT_INTRL 0 /* Legacy or Advanced Mode Queue */ @@ -149,6 +162,9 @@ struct ice_ring { }; u16 q_index; /* Queue number of ring */ u32 txq_teid; /* Added Tx queue TEID */ +#ifdef CONFIG_DCB + u8 dcb_tc; /* Traffic class of ring */ +#endif /* CONFIG_DCB */ u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -173,21 +189,13 @@ struct ice_ring { u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; -enum ice_latency_range { - ICE_LOWEST_LATENCY = 0, - ICE_LOW_LATENCY = 1, - ICE_BULK_LATENCY = 2, - ICE_ULTRA_LATENCY = 3, -}; - struct ice_ring_container { /* head of linked-list of rings */ struct ice_ring *ring; unsigned long next_update; /* jiffies value of next queue update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ - enum ice_latency_range latency_range; - int itr_idx; /* index in the interrupt vector */ + u16 itr_idx; /* index in the interrupt vector */ u16 target_itr; /* value in usecs divided by the hw->itr_gran */ u16 current_itr; /* value in usecs divided by the hw->itr_gran */ /* high bit set means dynamic ITR, rest is used to store user diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 17086d5b5c33..77bc0439e108 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_LINK BIT_ULL(4) +#define ICE_DBG_PHY BIT_ULL(5) #define ICE_DBG_QCTX BIT_ULL(6) #define ICE_DBG_NVM BIT_ULL(7) #define ICE_DBG_LAN BIT_ULL(8) @@ -106,7 +107,7 @@ struct ice_link_status { }; /* Different reset sources for which a disable queue AQ call has to be made in - * order to clean the TX scheduler as a part of the reset + * order to clean the Tx scheduler as a part of the reset */ enum ice_disq_rst_src { ICE_NO_RESET = 0, @@ -128,11 +129,11 @@ struct ice_phy_info { struct ice_hw_common_caps { u32 valid_functions; - /* TX/RX queues */ - u16 num_rxq; /* Number/Total RX queues */ - u16 rxq_first_id; /* First queue ID for RX queues */ - u16 num_txq; /* Number/Total TX queues */ - u16 txq_first_id; /* First queue ID for TX queues */ + /* Tx/Rx queues */ + u16 num_rxq; /* Number/Total Rx queues */ + u16 rxq_first_id; /* First queue ID for Rx queues */ + u16 num_txq; /* Number/Total Tx queues */ + u16 txq_first_id; /* First queue ID for Tx queues */ /* MSI-X vectors */ u16 num_msix_vectors; @@ -147,6 +148,8 @@ struct ice_hw_common_caps { /* RSS related capabilities */ u16 rss_table_size; /* 512 for PFs and 64 for VFs */ u8 rss_table_entry_width; /* RSS Entry width in bits */ + + u8 dcb; }; /* Function specific capabilities */ @@ -209,12 +212,17 @@ struct ice_nvm_info { #define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS +#define ice_for_each_traffic_class(_i) \ + for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) + +#define ICE_INVAL_TEID 0xFFFFFFFF + struct ice_sched_node { struct ice_sched_node *parent; struct ice_sched_node *sibling; /* next sibling in the same layer */ struct ice_sched_node **children; struct ice_aqc_txsched_elem_data info; - u32 agg_id; /* aggregator group id */ + u32 agg_id; /* aggregator group ID */ u16 vsi_handle; u8 in_use; /* suspended or in use */ u8 tx_sched_layer; /* Logical Layer (1-9) */ @@ -241,13 +249,12 @@ enum ice_agg_type { #define ICE_SCHED_DFLT_RL_PROF_ID 0 #define ICE_SCHED_DFLT_BW_WT 1 -/* vsi type list entry to locate corresponding vsi/ag nodes */ +/* VSI type list entry to locate corresponding VSI/ag nodes */ struct ice_sched_vsi_info { struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS]; struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; struct list_head list_entry; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; - u16 vsi_id; }; /* driver defines the policy */ @@ -257,9 +264,62 @@ struct ice_sched_tx_policy { u8 rdma_ena; }; +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct ice_dcb_ets_cfg { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prio_table[ICE_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS]; + u8 tsatable[ICE_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct ice_dcb_pfc_cfg { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcena; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct ice_dcb_app_priority_table { + u16 prot_id; + u8 priority; + u8 selector; +}; + +#define ICE_MAX_USER_PRIORITY 8 +#define ICE_DCBX_MAX_APPS 32 +#define ICE_LLDPDU_SIZE 1500 +#define ICE_TLV_STATUS_OPER 0x1 +#define ICE_TLV_STATUS_SYNC 0x2 +#define ICE_TLV_STATUS_ERR 0x4 +#define ICE_APP_PROT_ID_FCOE 0x8906 +#define ICE_APP_PROT_ID_ISCSI 0x0cbc +#define ICE_APP_PROT_ID_FIP 0x8914 +#define ICE_APP_SEL_ETHTYPE 0x1 +#define ICE_APP_SEL_TCPIP 0x2 +#define ICE_CEE_APP_SEL_ETHTYPE 0x0 +#define ICE_CEE_APP_SEL_TCPIP 0x1 + +struct ice_dcbx_cfg { + u32 numapps; + u32 tlv_status; /* CEE mode TLV status */ + struct ice_dcb_ets_cfg etscfg; + struct ice_dcb_ets_cfg etsrec; + struct ice_dcb_pfc_cfg pfc; + struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; + u8 dcbx_mode; +#define ICE_DCBX_MODE_CEE 0x1 +#define ICE_DCBX_MODE_IEEE 0x2 + u8 app_mode; +#define ICE_DCBX_APPS_NON_WILLING 0x1 +}; + struct ice_port_info { struct ice_sched_node *root; /* Root Node per Port */ - struct ice_hw *hw; /* back pointer to hw instance */ + struct ice_hw *hw; /* back pointer to HW instance */ u32 last_node_teid; /* scheduler last node info */ u16 sw_id; /* Initial switch ID belongs to port */ u16 pf_vf_num; @@ -274,6 +334,13 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ + struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ + /* DCBX info */ + struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ + struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ + /* LLDP/DCBX Status */ + u8 dcbx_status; + u8 is_sw_lldp; u8 lport; #define ICE_LPORT_MASK 0xff u8 is_vf; @@ -320,7 +387,7 @@ struct ice_hw { u8 pf_id; /* device profile info */ - /* TX Scheduler values */ + /* Tx Scheduler values */ u16 num_tx_sched_layers; u16 num_tx_sched_phys_layers; u8 flattened_layers; @@ -331,7 +398,7 @@ struct ice_hw { struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ - u8 reset_ongoing; /* true if hw is in reset, false otherwise */ + u8 reset_ongoing; /* true if HW is in reset, false otherwise */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ @@ -410,6 +477,11 @@ struct ice_hw_port_stats { u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ u64 rx_size_64; /* prc64 */ u64 rx_size_127; /* prc127 */ u64 rx_size_255; /* prc255 */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 57155b4a59dc..e562ea15b79b 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -5,6 +5,37 @@ #include "ice_lib.h" /** + * ice_err_to_virt err - translate errors for VF return code + * @ice_err: error return code + */ +static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err) +{ + switch (ice_err) { + case ICE_SUCCESS: + return VIRTCHNL_STATUS_SUCCESS; + case ICE_ERR_BAD_PTR: + case ICE_ERR_INVAL_SIZE: + case ICE_ERR_DEVICE_NOT_SUPPORTED: + case ICE_ERR_PARAM: + case ICE_ERR_CFG: + return VIRTCHNL_STATUS_ERR_PARAM; + case ICE_ERR_NO_MEMORY: + return VIRTCHNL_STATUS_ERR_NO_MEMORY; + case ICE_ERR_NOT_READY: + case ICE_ERR_RESET_FAILED: + case ICE_ERR_FW_API_VER: + case ICE_ERR_AQ_ERROR: + case ICE_ERR_AQ_TIMEOUT: + case ICE_ERR_AQ_FULL: + case ICE_ERR_AQ_NO_WORK: + case ICE_ERR_AQ_EMPTY: + return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + default: + return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } +} + +/** * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF * @pf: pointer to the PF structure * @v_opcode: operation code @@ -14,7 +45,7 @@ */ static void ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, - enum ice_status v_retval, u8 *msg, u16 msglen) + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { struct ice_hw *hw = &pf->hw; struct ice_vf *vf = pf->vf; @@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & ICE_AQ_LINK_UP); - ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) } /** - * ice_vsi_set_pvid - Set port VLAN id for the VSI - * @vsi: the VSI being changed - * @vid: the VLAN id to set as a PVID + * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID + * @ctxt: the VSI ctxt to fill + * @vid: the VLAN ID to set as a PVID + */ +static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid) +{ + ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR); + ctxt->info.pvid = cpu_to_le16(vid); + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); +} + +/** + * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID + * @ctxt: the VSI ctxt to fill + */ +static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) +{ + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; + ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); +} + +/** + * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI + * @vsi: the VSI to update + * @vid: the VLAN ID to set as a PVID + * @enable: true for enable PVID false for disable */ -static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) +static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) { struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; @@ -359,50 +421,31 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) if (!ctxt) return -ENOMEM; - ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | - ICE_AQ_VSI_PVLAN_INSERT_PVID | - ICE_AQ_VSI_VLAN_EMOD_STR); - ctxt->info.pvid = cpu_to_le16(vid); - ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt->info = vsi->info; + if (enable) + ice_vsi_set_pvid_fill_ctxt(ctxt, vid); + else + ice_vsi_kill_pvid_fill_ctxt(ctxt); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; } - vsi->info.pvid = ctxt->info.pvid; - vsi->info.vlan_flags = ctxt->info.vlan_flags; + vsi->info = ctxt->info; out: devm_kfree(dev, ctxt); return ret; } /** - * ice_vsi_kill_pvid - Remove port VLAN id from the VSI - * @vsi: the VSI being changed - */ -static int ice_vsi_kill_pvid(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - - if (ice_vsi_manage_vlan_stripping(vsi, false)) { - dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n", - vsi->vsi_num); - return -ENODEV; - } - - vsi->info.pvid = 0; - return 0; -} - -/** * ice_vf_vsi_setup - Set up a VF VSI * @pf: board private structure * @pi: pointer to the port_info instance - * @vf_id: defines VF id to which this VSI connects. + * @vf_id: defines VF ID to which this VSI connects. * * Returns pointer to the successfully allocated VSI struct on success, * otherwise returns NULL on failure. @@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) vsi->hw_base_vector += 1; /* Check if port VLAN exist before, and restore it accordingly */ - if (vf->port_vlan_id) - ice_vsi_set_pvid(vsi, vf->port_vlan_id); + if (vf->port_vlan_id) { + ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true); + ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M); + } eth_broadcast_addr(broadcast); @@ -468,7 +513,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. - * We dont want to reconfigure interrupts since AVF driver doesn't + * We don't want to reconfigure interrupts since AVF driver doesn't * expect vector assignment to be changed unless there is a request for * more vectors. */ @@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit: */ static int ice_alloc_vf_res(struct ice_vf *vf) { + struct ice_pf *pf = vf->pf; + int tx_rx_queue_left; int status; /* setup VF VSI and necessary resources */ @@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf) if (status) goto ice_alloc_vf_res_exit; + /* Update number of VF queues, in case VF had requested for queue + * changes + */ + tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + tx_rx_queue_left += ICE_DFLT_QS_PER_VF; + if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left && + vf->num_req_qs != vf->num_vf_qs) + vf->num_vf_qs = vf->num_req_qs; + if (vf->trusted) set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) wr32(hw, GLINT_VECT2FUNC(v), reg); } + /* Map mailbox interrupt. We put an explicit 0 here to remind us that + * VF admin queue interrupts will go to VF MSI-X vector 0. + */ + wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0); /* set regardless of mapping mode */ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); @@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) } /** + * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s) + * @vf: pointer to the VF info + * @vsi: the VSI being configured + * @promisc_m: mask of promiscuous config bits + * @rm_promisc: promisc flag request from the VF to remove or add filter + * + * This function configures VF VSI promiscuous mode, based on the VF requests, + * for Unicast, Multicast and VLAN + */ +static enum ice_status +ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, + bool rm_promisc) +{ + struct ice_pf *pf = vf->pf; + enum ice_status status = 0; + struct ice_hw *hw; + + hw = &pf->hw; + if (vf->num_vlan) { + status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, + rm_promisc); + } else if (vf->port_vlan_id) { + if (rm_promisc) + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_id); + else + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + vf->port_vlan_id); + } else { + if (rm_promisc) + status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + else + status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, + 0); + } + + return status; +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @is_vflr: true if VFLR was issued, false if not @@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) { struct ice_hw *hw = &pf->hw; + struct ice_vf *vf; int v, i; /* If we don't have any VFs, then there is nothing to reset */ @@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) for (v = 0; v < pf->num_alloc_vfs; v++) ice_trigger_vf_reset(&pf->vf[v], is_vflr); - /* Call Disable LAN Tx queue AQ call with VFR bit set and 0 - * queues to inform Firmware about VF reset. - */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL, - ICE_VF_RESET, v, NULL); + for (v = 0; v < pf->num_alloc_vfs; v++) { + struct ice_vsi *vsi; + + vf = &pf->vf[v]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + } + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) /* Check each VF in sequence */ while (v < pf->num_alloc_vfs) { - struct ice_vf *vf = &pf->vf[v]; u32 reg; + vf = &pf->vf[v]; reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); if (!(reg & VPGEN_VFRSTAT_VFRD_M)) break; @@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) usleep_range(10000, 20000); /* free VF resources to begin resetting the VSI state */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_free_vf_res(&pf->vf[v]); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + + ice_free_vf_res(vf); + + /* Free VF queues as well, and reallocate later. + * If a given VF has different number of queues + * configured, the request for update will come + * via mailbox communication. + */ + vf->num_vf_qs = 0; + } if (ice_check_avail_res(pf)) { dev_err(&pf->pdev->dev, @@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) - ice_cleanup_and_realloc_vf(&pf->vf[v]); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + + vf->num_vf_qs = pf->num_vf_qps; + dev_dbg(&pf->pdev->dev, + "VF-id %d has %d queues configured\n", + vf->vf_id, vf->num_vf_qs); + ice_cleanup_and_realloc_vf(vf); + } ice_flush(hw); clear_bit(__ICE_VF_DIS, pf->state); @@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; - struct ice_hw *hw = &pf->hw; struct ice_vsi *vsi; + struct ice_hw *hw; bool rsd = false; + u8 promisc_m; u32 reg; int i; @@ -875,6 +1000,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) vf->vf_id, NULL); } + hw = &pf->hw; /* poll VPGEN_VFRSTAT reg to make sure * that reset is complete */ @@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) usleep_range(10000, 20000); + /* disable promiscuous modes in case they were enabled + * ignore any error if disabling process failed + */ + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) { + if (vf->port_vlan_id || vf->num_vlan) + promisc_m = ICE_UCAST_VLAN_PROMISC_BITS; + else + promisc_m = ICE_UCAST_PROMISC_BITS; + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) + dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n"); + } + /* free VF resources to begin resetting the VSI state */ ice_free_vf_res(vf); @@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS, + ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } @@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, - (u8 *)&pfe, sizeof(pfe), NULL); + ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), + NULL); } /** @@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) pf->num_alloc_vfs = num_alloc_vfs; /* VF resources get allocated during reset */ - if (!ice_reset_all_vfs(pf, false)) + if (!ice_reset_all_vfs(pf, true)) goto err_unroll_sriov; goto err_unroll_intr; @@ -1182,8 +1324,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf) * * send msg to VF */ -static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, - enum ice_status v_retval, u8 *msg, u16 msglen) +static int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { enum ice_status aq_ret; struct ice_pf *pf; @@ -1243,8 +1386,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) if (VF_IS_V10(&vf->vf_ver)) info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS, - (u8 *)&info, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&info, sizeof(struct virtchnl_version_info)); } @@ -1257,15 +1400,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) */ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_resource *vfres = NULL; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int len = 0; int ret; if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } @@ -1273,7 +1416,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); if (!vfres) { - aq_ret = ICE_ERR_NO_MEMORY; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; len = 0; goto err; } @@ -1286,6 +1429,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (!vsi->info.pvid) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; @@ -1336,7 +1484,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) err: /* send the response back to the VF */ - ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, (u8 *)vfres, len); devm_kfree(&pf->pdev->dev, vfres); @@ -1360,15 +1508,15 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) /** * ice_find_vsi_from_id * @pf: the pf structure to search for the VSI - * @id: id of the VSI it is searching for + * @id: ID of the VSI it is searching for * - * searches for the VSI with the given id + * searches for the VSI with the given ID */ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) { int i; - for (i = 0; i < pf->num_alloc_vsi; i++) + ice_for_each_vsi(pf, i) if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) return pf->vsi[i]; @@ -1378,9 +1526,9 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) /** * ice_vc_isvalid_vsi_id * @vf: pointer to the VF info - * @vsi_id: VF relative VSI id + * @vsi_id: VF relative VSI ID * - * check for the valid VSI id + * check for the valid VSI ID */ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { @@ -1395,10 +1543,10 @@ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) /** * ice_vc_isvalid_q_id * @vf: pointer to the VF info - * @vsi_id: VSI id - * @qid: VSI relative queue id + * @vsi_id: VSI ID + * @qid: VSI relative queue ID * - * check for the valid queue id + * check for the valid queue ID */ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) { @@ -1416,42 +1564,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) */ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi = NULL; - enum ice_status aq_ret; - int ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ret = ice_set_rss(vsi, vrk->key, NULL, 0); - aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; + if (ice_set_rss(vsi, vrk->key, NULL, 0)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, NULL, 0); } @@ -1465,40 +1613,40 @@ error_param: static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi = NULL; - enum ice_status aq_ret; - int ret; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE); - aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; + if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, NULL, 0); } @@ -1511,25 +1659,26 @@ error_param: */ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_eth_stats stats; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1540,7 +1689,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret, (u8 *)&stats, sizeof(stats)); } @@ -1553,29 +1702,30 @@ error_param: */ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!vqs->rx_queues && !vqs->tx_queues) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1584,15 +1734,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) * programmed using ice_vsi_cfg_txqs */ if (ice_vsi_start_rx_rings(vsi)) - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; /* Set flag to indicate that queues are enabled */ - if (!aq_ret) + if (v_ret == VIRTCHNL_STATUS_SUCCESS) set_bit(ICE_VF_STATE_ENA, vf->vf_states); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret, NULL, 0); } @@ -1606,30 +1756,31 @@ error_param: */ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!vqs->rx_queues && !vqs->tx_queues) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1637,23 +1788,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) dev_err(&vsi->back->pdev->dev, "Failed to stop tx rings on VSI %d\n", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } if (ice_vsi_stop_rx_rings(vsi)) { dev_err(&vsi->back->pdev->dev, "Failed to stop rx rings on VSI %d\n", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } /* Clear enabled queues flag */ - if (!aq_ret) + if (v_ret == VIRTCHNL_STATUS_SUCCESS) clear_bit(ICE_VF_STATE_ENA, vf->vf_states); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret, NULL, 0); } @@ -1666,18 +1817,18 @@ error_param: */ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_irq_map_info *irqmap_info = (struct virtchnl_irq_map_info *)msg; u16 vsi_id, vsi_q_id, vector_id; struct virtchnl_vector_map *map; struct ice_vsi *vsi = NULL; struct ice_pf *pf = vf->pf; - enum ice_status aq_ret = 0; unsigned long qmap; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1689,13 +1840,13 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) /* validate msg params */ if (!(vector_id < pf->hw.func_caps.common_cap .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1705,7 +1856,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) struct ice_q_vector *q_vector; if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } q_vector = vsi->q_vectors[i]; @@ -1719,7 +1870,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) struct ice_q_vector *q_vector; if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } q_vector = vsi->q_vectors[i]; @@ -1733,7 +1884,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ice_vsi_cfg_msix(vsi); error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, NULL, 0); } @@ -1746,26 +1897,34 @@ error_param: */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; - enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id); + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { + dev_err(&pf->pdev->dev, + "VF-%d requesting more than supported number of queues: %d\n", + vf->vf_id, qci->num_queue_pairs); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1775,7 +1934,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.queue_id != qpi->txq.queue_id || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } /* copy Tx queue info from VF into VSI */ @@ -1785,13 +1944,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size >= (16 * 1024) || qpi->rxq.max_pkt_size < 64) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi->max_frame = qpi->rxq.max_pkt_size; @@ -1802,15 +1961,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) */ vsi->num_txq = qci->num_queue_pairs; vsi->num_rxq = qci->num_queue_pairs; + /* All queues of VF VSI are in TC 0 */ + vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; + vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; - if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) - aq_ret = 0; - else - aq_ret = ICE_ERR_PARAM; + if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; error_param: /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, NULL, 0); } @@ -1845,18 +2005,18 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) * ice_vc_handle_mac_addr_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @set: true if mac filters are being set, false otherwise + * @set: true if MAC filters are being set, false otherwise * * add guest MAC address filter */ static int ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_ether_addr_list *al = (struct virtchnl_ether_addr_list *)msg; struct ice_pf *pf = vf->pf; enum virtchnl_ops vc_op; - enum ice_status ret; LIST_HEAD(mac_list); struct ice_vsi *vsi; int mac_count = 0; @@ -1869,19 +2029,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } if (set && !ice_is_vf_trusted(vf) && (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { dev_err(&pf->pdev->dev, - "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n"); - ret = ICE_ERR_PARAM; + "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", + vf->vf_id); + /* There is no need to let VF know about not being trusted + * to add more MAC addr, so we can just return success message. + */ + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } for (i = 0; i < al->num_elements; i++) { u8 *maddr = al->list[i].addr; @@ -1893,40 +2061,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) * already added. Just continue. */ dev_info(&pf->pdev->dev, - "mac %pM already set for VF %d\n", + "MAC %pM already set for VF %d\n", maddr, vf->vf_id); continue; } else { - /* VF can't remove dflt_lan_addr/bcast mac */ + /* VF can't remove dflt_lan_addr/bcast MAC */ dev_err(&pf->pdev->dev, - "can't remove mac %pM for VF %d\n", + "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n", maddr, vf->vf_id); - ret = ICE_ERR_PARAM; - goto handle_mac_exit; + continue; } } /* check for the invalid cases and bail if necessary */ if (is_zero_ether_addr(maddr)) { dev_err(&pf->pdev->dev, - "invalid mac %pM provided for VF %d\n", + "invalid MAC %pM provided for VF %d\n", maddr, vf->vf_id); - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } if (is_unicast_ether_addr(maddr) && !ice_can_vf_change_mac(vf)) { dev_err(&pf->pdev->dev, - "can't change unicast mac for untrusted VF %d\n", + "can't change unicast MAC for untrusted VF %d\n", vf->vf_id); - ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto handle_mac_exit; } - /* get here if maddr is multicast or if VF can change mac */ + /* get here if maddr is multicast or if VF can change MAC */ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { - ret = ICE_ERR_NO_MEMORY; + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; goto handle_mac_exit; } mac_count++; @@ -1934,14 +2101,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) /* program the updated filter list */ if (set) - ret = ice_add_mac(&pf->hw, &mac_list); + v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list)); else - ret = ice_remove_mac(&pf->hw, &mac_list); + v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list)); - if (ret) { + if (v_ret) { dev_err(&pf->pdev->dev, - "can't update mac filters for VF %d, error %d\n", - vf->vf_id, ret); + "can't update MAC filters for VF %d, error %d\n", + vf->vf_id, v_ret); } else { if (set) vf->num_mac += mac_count; @@ -1952,7 +2119,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) handle_mac_exit: ice_free_fltr_list(&pf->pdev->dev, &mac_list); /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0); + return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); } /** @@ -1987,39 +2154,42 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) * VFs get a default number of queues but can use this message to request a * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of - * available queue pairs via virtchnl message response to vf. + * available queue pairs via virtchnl message response to VF. */ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; int req_queues = vfres->num_queue_pairs; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; + int max_allowed_vf_queues; int tx_rx_queue_left; int cur_queues; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - cur_queues = pf->num_vf_qps; + cur_queues = vf->num_vf_qs; tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + max_allowed_vf_queues = tx_rx_queue_left + cur_queues; if (req_queues <= 0) { dev_err(&pf->pdev->dev, "VF %d tried to request %d queues. Ignoring.\n", vf->vf_id, req_queues); - } else if (req_queues > ICE_MAX_QS_PER_VF) { + } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { dev_err(&pf->pdev->dev, "VF %d tried to request more than %d queues.\n", - vf->vf_id, ICE_MAX_QS_PER_VF); - vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; + vf->vf_id, ICE_MAX_BASE_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; } else if (req_queues - cur_queues > tx_rx_queue_left) { dev_warn(&pf->pdev->dev, "VF %d requested %d more queues, but only %d left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); - vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; + vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues, + ICE_MAX_BASE_QS_PER_VF); } else { /* request is successful, then reset VF */ vf->num_req_qs = req_queues; @@ -2033,18 +2203,18 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) error_param: /* send the response to the VF */ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, - aq_ret, (u8 *)vfres, sizeof(*vfres)); + v_ret, (u8 *)vfres, sizeof(*vfres)); } /** * ice_set_vf_port_vlan * @netdev: network interface device structure * @vf_id: VF identifier - * @vlan_id: VLAN id being set + * @vlan_id: VLAN ID being set * @qos: priority setting * @vlan_proto: VLAN protocol * - * program VF Port VLAN id and/or qos + * program VF Port VLAN ID and/or QoS */ int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -2087,17 +2257,18 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return ret; } - /* If pvid, then remove all filters on the old VLAN */ + /* If PVID, then remove all filters on the old VLAN */ if (vsi->info.pvid) ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & VLAN_VID_MASK)); if (vlan_id || qos) { - ret = ice_vsi_set_pvid(vsi, vlanprio); + ret = ice_vsi_manage_pvid(vsi, vlanprio, true); if (ret) goto error_set_pvid; } else { - ice_vsi_kill_pvid(vsi); + ice_vsi_manage_pvid(vsi, 0, false); + vsi->info.pvid = 0; } if (vlan_id) { @@ -2125,52 +2296,61 @@ error_set_pvid: * @msg: pointer to the msg buffer * @add_v: Add VLAN if true, otherwise delete VLAN * - * Process virtchnl op to add or remove programmed guest VLAN id + * Process virtchnl op to add or remove programmed guest VLAN ID */ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) { + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; - enum ice_status aq_ret = 0; struct ice_pf *pf = vf->pf; + bool vlan_promisc = false; struct ice_vsi *vsi; + struct ice_hw *hw; + int status = 0; + u8 promisc_m; int i; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (add_v && !ice_is_vf_trusted(vf) && vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { dev_info(&pf->pdev->dev, - "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n"); - aq_ret = ICE_ERR_PARAM; + "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", + vf->vf_id); + /* There is no need to let VF know about being not trusted, + * so we can just return success message here + */ + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > ICE_MAX_VLANID) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(&pf->pdev->dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; } } - vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id); + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } if (vsi->info.pvid) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -2178,23 +2358,47 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) dev_err(&pf->pdev->dev, "%sable VLAN stripping failed for VSI %i\n", add_v ? "en" : "dis", vsi->vsi_num); - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || + test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + vlan_promisc = true; + if (add_v) { for (i = 0; i < vfl->num_elements; i++) { u16 vid = vfl->vlan_id[i]; - if (!ice_vsi_add_vlan(vsi, vid)) { - vf->num_vlan++; + if (ice_vsi_add_vlan(vsi, vid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } - /* Enable VLAN pruning when VLAN 0 is added */ - if (unlikely(!vid)) - if (ice_cfg_vlan_pruning(vsi, true)) - aq_ret = ICE_ERR_PARAM; + vf->num_vlan++; + /* Enable VLAN pruning when VLAN is added */ + if (!vlan_promisc) { + status = ice_cfg_vlan_pruning(vsi, true, false); + if (status) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(&pf->pdev->dev, + "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", + vid, status); + goto error_param; + } } else { - aq_ret = ICE_ERR_PARAM; + /* Enable Ucast/Mcast VLAN promiscuous mode */ + promisc_m = ICE_PROMISC_VLAN_TX | + ICE_PROMISC_VLAN_RX; + + status = ice_set_vsi_promisc(hw, vsi->idx, + promisc_m, vid); + if (status) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(&pf->pdev->dev, + "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", + vid, status); + } } } } else { @@ -2204,12 +2408,22 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) /* Make sure ice_vsi_kill_vlan is successful before * updating VLAN information */ - if (!ice_vsi_kill_vlan(vsi, vid)) { - vf->num_vlan--; + if (ice_vsi_kill_vlan(vsi, vid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + vf->num_vlan--; + /* Disable VLAN pruning when removing VLAN */ + ice_cfg_vlan_pruning(vsi, false, false); - /* Disable VLAN pruning when removing VLAN 0 */ - if (unlikely(!vid)) - ice_cfg_vlan_pruning(vsi, false); + /* Disable Unicast/Multicast VLAN promiscuous mode */ + if (vlan_promisc) { + promisc_m = ICE_PROMISC_VLAN_TX | + ICE_PROMISC_VLAN_RX; + + ice_clear_vsi_promisc(hw, vsi->idx, + promisc_m, vid); } } } @@ -2217,10 +2431,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) error_param: /* send the response to the VF */ if (add_v) - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret, NULL, 0); else - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret, + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret, NULL, 0); } @@ -2229,7 +2443,7 @@ error_param: * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * Add and program guest VLAN id + * Add and program guest VLAN ID */ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) { @@ -2241,7 +2455,7 @@ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * remove programmed guest VLAN id + * remove programmed guest VLAN ID */ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) { @@ -2256,22 +2470,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) */ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) { - enum ice_status aq_ret = 0; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vsi_manage_vlan_stripping(vsi, true)) - aq_ret = ICE_ERR_AQ_ERROR; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, - aq_ret, NULL, 0); + v_ret, NULL, 0); } /** @@ -2282,22 +2496,27 @@ error_param: */ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) { - enum ice_status aq_ret = 0; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - aq_ret = ICE_ERR_PARAM; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (ice_vsi_manage_vlan_stripping(vsi, false)) - aq_ret = ICE_ERR_AQ_ERROR; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; error_param: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, - aq_ret, NULL, 0); + v_ret, NULL, 0); } /** @@ -2333,7 +2552,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) /* Perform basic checks on the msg */ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); if (err) { - if (err == VIRTCHNL_ERR_PARAM) + if (err == VIRTCHNL_STATUS_ERR_PARAM) err = -EPERM; else err = -EINVAL; @@ -2355,7 +2574,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) error_handler: if (err) { - ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0); + ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, + NULL, 0); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); return; @@ -2418,7 +2638,8 @@ error_handler: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, vf_id); - err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL, + err = ice_vc_send_msg_to_vf(vf, v_opcode, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); break; } @@ -2427,7 +2648,7 @@ error_handler: * as it is busy with pending work. */ dev_info(&pf->pdev->dev, - "PF failed to honor VF %d, opcode %d\n, error %d\n", + "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } } @@ -2440,8 +2661,8 @@ error_handler: * * return VF configuration */ -int ice_get_vf_cfg(struct net_device *netdev, int vf_id, - struct ifla_vf_info *ivi) +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -2550,9 +2771,9 @@ out: * ice_set_vf_mac * @netdev: network interface device structure * @vf_id: VF identifier - * @mac: mac address + * @mac: MAC address * - * program VF mac address + * program VF MAC address */ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { @@ -2579,7 +2800,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EINVAL; } - /* copy mac into dflt_lan_addr and trigger a VF reset. The reset + /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset * flow will use the updated dflt_lan_addr and add a MAC filter * using ice_add_mac. Also set pf_set_mac to indicate that the PF has * set the MAC address for this VF. @@ -2587,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ether_addr_copy(vf->dflt_lan_addr.addr, mac); vf->pf_set_mac = true; netdev_info(netdev, - "mac on VF %d set to %pM\n. VF driver will be reinitialized\n", + "MAC on VF %d set to %pM. VF driver will be reinitialized\n", vf_id, mac); ice_vc_dis_vf(vf); @@ -2690,7 +2911,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); /* Notify the VF of its new link state */ - ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 01470a8ee03a..3725aea16840 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -48,10 +48,10 @@ enum ice_virtchnl_cap { struct ice_vf { struct ice_pf *pf; - s16 vf_id; /* VF id in the PF space */ + s16 vf_id; /* VF ID in the PF space */ u32 driver_caps; /* reported by VF driver */ int first_vector_idx; /* first vector index of this VF */ - struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */ + struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; struct virtchnl_ether_addr dflt_lan_addr; u16 port_vlan_id; @@ -59,10 +59,10 @@ struct ice_vf { u8 trusted; u16 lan_vsi_idx; /* index into PF struct */ u16 lan_vsi_num; /* ID as used by firmware */ - u64 num_mdd_events; /* number of mdd events detected */ + u64 num_mdd_events; /* number of MDD events detected */ u64 num_inval_msgs; /* number of continuous invalid msgs */ u64 num_valid_msgs; /* number of valid msgs detected */ - unsigned long vf_caps; /* vf's adv. capabilities */ + unsigned long vf_caps; /* VF's adv. capabilities */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ u8 link_forced; @@ -70,6 +70,7 @@ struct ice_vf { u8 spoofchk; u16 num_mac; u16 num_vlan; + u16 num_vf_qs; /* num of queue configured per VF */ u8 num_req_qs; /* num of queue pairs requested by VF */ }; @@ -77,8 +78,8 @@ struct ice_vf { void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); -int ice_get_vf_cfg(struct net_device *netdev, int vf_id, - struct ifla_vf_info *ivi); +int +ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); void ice_free_vfs(struct ice_pf *pf); void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); @@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); -int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, - u16 vlan_id, u8 qos, __be16 vlan_proto); - -int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, - int max_tx_rate); +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto); int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); @@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline int -ice_set_vf_bw(struct net_device __always_unused *netdev, - int __always_unused vf_id, int __always_unused min_tx_rate, - int __always_unused max_tx_rate) -{ - return -EOPNOTSUPP; -} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c57671068245..c645d9e648e0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3158,8 +3158,8 @@ static int igb_set_eee(struct net_device *netdev, } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, "Setting EEE options are not supported with EEE disabled\n"); - return -EINVAL; - } + return -EINVAL; + } adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3269d8e94744..9b8a4bb25327 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2480,7 +2480,7 @@ static int igb_set_features(struct net_device *netdev, else igb_reset(adapter); - return 0; + return 1; } static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } } + + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + pm_runtime_put_noidle(&pdev->dev); return 0; @@ -6026,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -8048,7 +8051,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IGB_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGB_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 80faccc34cda..0f5534ce27b0 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -29,9 +29,15 @@ unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); void igc_set_flag_queue_pairs(struct igc_adapter *adapter, const u32 max_rss_queues); int igc_reinit_queues(struct igc_adapter *adapter); +void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +void igc_update_stats(struct igc_adapter *adapter); extern char igc_driver_name[]; extern char igc_driver_version[]; @@ -51,6 +57,13 @@ extern char igc_driver_version[]; #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) +#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) + +#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002 +#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_4K_ITR 980 #define IGC_20K_ITR 196 @@ -284,15 +297,50 @@ struct igc_q_vector { struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#define MAX_ETYPE_FILTER (4 - 1) + +enum igc_filter_match_flags { + IGC_FILTER_FLAG_ETHER_TYPE = 0x1, + IGC_FILTER_FLAG_VLAN_TCI = 0x2, + IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, +}; + +/* RX network flow classification data structure */ +struct igc_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; +}; + +struct igc_nfc_filter { + struct hlist_node nfc_node; + struct igc_nfc_input filter; + unsigned long cookie; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + struct igc_mac_addr { u8 addr[ETH_ALEN]; u8 queue; u8 state; /* bitmask */ }; -#define IGC_MAC_STATE_DEFAULT 0x1 -#define IGC_MAC_STATE_MODIFIED 0x2 -#define IGC_MAC_STATE_IN_USE 0x4 +#define IGC_MAC_STATE_DEFAULT 0x1 +#define IGC_MAC_STATE_IN_USE 0x2 +#define IGC_MAC_STATE_SRC_ADDR 0x4 +#define IGC_MAC_STATE_QUEUE_STEERING 0x8 + +#define IGC_MAX_RXNFC_FILTERS 16 /* Board specific private data structure */ struct igc_adapter { @@ -356,12 +404,22 @@ struct igc_adapter { u16 tx_ring_count; u16 rx_ring_count; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 rx_hwtstamp_cleared; u32 *shadow_vfta; u32 rss_queues; + u32 rss_indir_tbl_init; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; + unsigned int nfc_filter_count; /* lock for RX network flow classification filter */ spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; struct igc_mac_addr *mac_table; @@ -447,6 +505,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) /* forward declaration */ void igc_reinit_locked(struct igc_adapter *); +int igc_add_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); +int igc_erase_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input); #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 76d4991d7284..58d1109d7f3f 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018 Intel Corporation */ -#ifndef _IGC_BASE_H -#define _IGC_BASE_H +#ifndef _IGC_BASE_H_ +#define _IGC_BASE_H_ /* forward declaration */ void igc_rx_fifo_flush_base(struct igc_hw *hw); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 7d1bdcd1225a..a9a30268de59 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -310,6 +310,12 @@ IGC_RXDEXT_STATERR_CXE | \ IGC_RXDEXT_STATERR_RXE) +#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + /* Header split receive */ #define IGC_RFCTL_IPV6_EX_DIS 0x00010000 #define IGC_RFCTL_LEF 0x00040000 @@ -325,6 +331,10 @@ #define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +/* Receive Checksum Control */ +#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + /* GPY211 - I225 defines */ #define GPY_MMD_MASK 0xFFFF0000 #define GPY_MMD_SHIFT 16 @@ -390,4 +400,11 @@ #define IGC_N0_QUEUE -1 +#define IGC_MAX_MAC_HDR_LEN 127 +#define IGC_MAX_NETWORK_HDR_LEN 511 + +#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) +#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define IGC_VLAPQF_QUEUE_MASK 0x03 + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index eff37a6c0afa..ac98f1d96892 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -2,10 +2,120 @@ /* Copyright (c) 2018 Intel Corporation */ /* ethtool support for igc */ +#include <linux/if_vlan.h> #include <linux/pm_runtime.h> #include "igc.h" +/* forward declaration */ +struct igc_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGC_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \ + .stat_offset = offsetof(struct igc_adapter, _stat) \ +} + +static const struct igc_stats igc_gstrings_stats[] = { + IGC_STAT("rx_packets", stats.gprc), + IGC_STAT("tx_packets", stats.gptc), + IGC_STAT("rx_bytes", stats.gorc), + IGC_STAT("tx_bytes", stats.gotc), + IGC_STAT("rx_broadcast", stats.bprc), + IGC_STAT("tx_broadcast", stats.bptc), + IGC_STAT("rx_multicast", stats.mprc), + IGC_STAT("tx_multicast", stats.mptc), + IGC_STAT("multicast", stats.mprc), + IGC_STAT("collisions", stats.colc), + IGC_STAT("rx_crc_errors", stats.crcerrs), + IGC_STAT("rx_no_buffer_count", stats.rnbc), + IGC_STAT("rx_missed_errors", stats.mpc), + IGC_STAT("tx_aborted_errors", stats.ecol), + IGC_STAT("tx_carrier_errors", stats.tncrs), + IGC_STAT("tx_window_errors", stats.latecol), + IGC_STAT("tx_abort_late_coll", stats.latecol), + IGC_STAT("tx_deferred_ok", stats.dc), + IGC_STAT("tx_single_coll_ok", stats.scc), + IGC_STAT("tx_multi_coll_ok", stats.mcc), + IGC_STAT("tx_timeout_count", tx_timeout_count), + IGC_STAT("rx_long_length_errors", stats.roc), + IGC_STAT("rx_short_length_errors", stats.ruc), + IGC_STAT("rx_align_errors", stats.algnerrc), + IGC_STAT("tx_tcp_seg_good", stats.tsctc), + IGC_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGC_STAT("rx_flow_control_xon", stats.xonrxc), + IGC_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGC_STAT("tx_flow_control_xon", stats.xontxc), + IGC_STAT("tx_flow_control_xoff", stats.xofftxc), + IGC_STAT("rx_long_byte_count", stats.gorc), + IGC_STAT("tx_dma_out_of_sync", stats.doosync), + IGC_STAT("tx_smbus", stats.mgptc), + IGC_STAT("rx_smbus", stats.mgprc), + IGC_STAT("dropped_smbus", stats.mgpdc), + IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGC_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGC_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} + +static const struct igc_stats igc_gstrings_net_stats[] = { + IGC_NETDEV_STAT(rx_errors), + IGC_NETDEV_STAT(tx_errors), + IGC_NETDEV_STAT(tx_dropped), + IGC_NETDEV_STAT(rx_length_errors), + IGC_NETDEV_STAT(rx_over_errors), + IGC_NETDEV_STAT(rx_frame_errors), + IGC_NETDEV_STAT(rx_fifo_errors), + IGC_NETDEV_STAT(tx_fifo_errors), + IGC_NETDEV_STAT(tx_heartbeat_errors) +}; + +enum igc_diagnostics_results { + TEST_REG = 0, + TEST_EEP, + TEST_IRQ, + TEST_LOOP, + TEST_LINK +}; + +static const char igc_gstrings_test[][ETH_GSTRING_LEN] = { + [TEST_REG] = "Register test (offline)", + [TEST_EEP] = "Eeprom test (offline)", + [TEST_IRQ] = "Interrupt test (offline)", + [TEST_LOOP] = "Loopback test (offline)", + [TEST_LINK] = "Link test (on/offline)" +}; + +#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN) + +#define IGC_GLOBAL_STATS_LEN \ + (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats)) +#define IGC_NETDEV_STATS_LEN \ + (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats)) +#define IGC_RX_QUEUE_STATS_LEN \ + (sizeof(struct igc_rx_queue_stats) / sizeof(u64)) +#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ +#define IGC_QUEUE_STATS_LEN \ + ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGC_RX_QUEUE_STATS_LEN) + \ + (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGC_TX_QUEUE_STATS_LEN)) +#define IGC_STATS_LEN \ + (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN) + static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", @@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev, return retval; } +static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igc_gstrings_test, + IGC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + memcpy(p, igc_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) { + memcpy(p, igc_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_drops", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, igc_priv_flags_strings, + IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int igc_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGC_STATS_LEN; + case ETH_SS_TEST: + return IGC_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return IGC_PRIV_FLAGS_STR_LEN; + default: + return -ENOTSUPP; + } +} + +static void igc_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igc_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igc_update_stats(adapter); + + for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igc_gstrings_stats[i].stat_offset; + data[i] = (igc_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset; + data[i] = (igc_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i + 1] = ring->tx_stats.bytes; + data[i + 2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i + 2] += restart2; + + i += IGC_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i + 1] = ring->rx_stats.bytes; + data[i + 2] = ring->rx_stats.drops; + data[i + 3] = ring->rx_stats.csum_err; + data[i + 4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGC_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + static int igc_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev, return 0; } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igc_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + + return 0; + } + return -EINVAL; +} + +static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGC_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int igc_get_rss_hash_opts(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igc */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V4_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V4_FLOW: + /* Fall through */ + case AH_ESP_V4_FLOW: + /* Fall through */ + case AH_V4_FLOW: + /* Fall through */ + case ESP_V4_FLOW: + /* Fall through */ + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V6_FLOW: + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V6_FLOW: + /* Fall through */ + case AH_ESP_V6_FLOW: + /* Fall through */ + case AH_V6_FLOW: + /* Fall through */ + case ESP_V6_FLOW: + /* Fall through */ + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igc_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = igc_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \ + IGC_FLAG_RSS_FIELD_IPV6_UDP) +static int igc_set_rss_hash_opt(struct igc_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct igc_hw *hw = &adapter->hw; + u32 mrqc = rd32(IGC_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP | + IGC_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(IGC_MRQC, mrqc); + } + + return 0; +} + +static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(IGC_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= IGC_ETQF_FILTER_ENABLE; + etqf &= ~IGC_ETQF_ETYPE_MASK; + etqf |= (etype & IGC_ETQF_ETYPE_MASK); + + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT) + & IGC_ETQF_QUEUE_MASK); + etqf |= IGC_ETQF_QUEUE_ENABLE; + + wr32(IGC_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter, + struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(IGC_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) && + queue_index != input->action) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(IGC_VLAPQF, vlapqf); + + return 0; +} + +int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + struct igc_hw *hw = &adapter->hw; + int err = -EINVAL; + + if (hw->mac.type == igc_i225 && + !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i225 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + err = igc_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { + err = igc_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + err = igc_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igc_clear_etype_filter_regs(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 etqf = rd32(IGC_ETQF(reg_index)); + + etqf &= ~IGC_ETQF_QUEUE_ENABLE; + etqf &= ~IGC_ETQF_QUEUE_MASK; + etqf &= ~IGC_ETQF_FILTER_ENABLE; + + wr32(IGC_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter, + u16 vlan_tci) +{ + struct igc_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(IGC_VLAPQF); + vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority, + IGC_VLAPQF_QUEUE_MASK); + + wr32(IGC_VLAPQF, vlapqf); +} + +int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input) +{ + if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) + igc_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGC_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + + return 0; +} + +static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter, + struct igc_nfc_filter *input, + u16 sw_idx) +{ + struct igc_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + if (!input) + err = igc_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->nfc_node, &parent->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igc_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie >= adapter->num_rx_queues) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGC_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE; + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igc_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igc_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igc_set_rss_hash_opt(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = igc_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igc_del_ethtool_nfc_entry(adapter, cmd); + default: + break; + } + + return ret; +} + void igc_write_rss_indir_tbl(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; @@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev, if (hw->mac.type == igc_i225 && (status & IGC_STATUS_SPEED_2500)) { speed = SPEED_2500; - hw_dbg("2500 Mbs, "); } else { speed = SPEED_1000; - hw_dbg("1000 Mbs, "); } } else if (status & IGC_STATUS_SPEED_100) { speed = SPEED_100; - hw_dbg("100 Mbs, "); } else { speed = SPEED_10; - hw_dbg("10 Mbs, "); } if ((status & IGC_STATUS_FD) || hw->phy.media_type != igc_media_type_copper) @@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_ringparam = igc_set_ringparam, .get_pauseparam = igc_get_pauseparam, .set_pauseparam = igc_set_pauseparam, + .get_strings = igc_get_strings, + .get_sset_count = igc_get_sset_count, + .get_ethtool_stats = igc_get_ethtool_stats, .get_coalesce = igc_get_coalesce, .set_coalesce = igc_set_coalesce, + .get_rxnfc = igc_get_rxnfc, + .set_rxnfc = igc_set_rxnfc, .get_rxfh_indir_size = igc_get_rxfh_indir_size, .get_rxfh = igc_get_rxfh, .set_rxfh = igc_set_rxfh, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..e58a6e0dc4d9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter) */ static void igc_setup_mrqc(struct igc_adapter *adapter) { + struct igc_hw *hw = &adapter->hw; + u32 j, num_rx_queues; + u32 mrqc, rxcsum; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(IGC_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGC_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGC_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igc_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(IGC_RXCSUM); + rxcsum |= IGC_RXCSUM_PCSD; + + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= IGC_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(IGC_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = IGC_MRQC_RSS_FIELD_IPV4 | + IGC_MRQC_RSS_FIELD_IPV4_TCP | + IGC_MRQC_RSS_FIELD_IPV6 | + IGC_MRQC_RSS_FIELD_IPV6_TCP | + IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; + + mrqc |= IGC_MRQC_ENABLE_RSS_MQ; + + wr32(IGC_MRQC, mrqc); } /** @@ -890,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring, /* Make sure there is space in the ring for the next send. */ igc_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -1150,7 +1199,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IGC_RX_HDR_LEN) - headlen = eth_get_headlen(va, IGC_RX_HDR_LEN); + headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); @@ -1738,12 +1787,200 @@ void igc_up(struct igc_adapter *adapter) * igc_update_stats - Update the board statistics counters * @adapter: board private structure */ -static void igc_update_stats(struct igc_adapter *adapter) +void igc_update_stats(struct igc_adapter *adapter) { + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + struct pci_dev *pdev = adapter->pdev; + struct igc_hw *hw = &adapter->hw; + u64 _bytes, _packets; + u64 bytes, packets; + unsigned int start; + u32 mpc; + int i; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + packets = 0; + bytes = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igc_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(IGC_RQDPC(i)); + + if (hw->mac.type >= igc_i225) + wr32(IGC_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + packets = 0; + bytes = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(IGC_CRCERRS); + adapter->stats.gprc += rd32(IGC_GPRC); + adapter->stats.gorc += rd32(IGC_GORCL); + rd32(IGC_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(IGC_BPRC); + adapter->stats.mprc += rd32(IGC_MPRC); + adapter->stats.roc += rd32(IGC_ROC); + + adapter->stats.prc64 += rd32(IGC_PRC64); + adapter->stats.prc127 += rd32(IGC_PRC127); + adapter->stats.prc255 += rd32(IGC_PRC255); + adapter->stats.prc511 += rd32(IGC_PRC511); + adapter->stats.prc1023 += rd32(IGC_PRC1023); + adapter->stats.prc1522 += rd32(IGC_PRC1522); + adapter->stats.symerrs += rd32(IGC_SYMERRS); + adapter->stats.sec += rd32(IGC_SEC); + + mpc = rd32(IGC_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(IGC_SCC); + adapter->stats.ecol += rd32(IGC_ECOL); + adapter->stats.mcc += rd32(IGC_MCC); + adapter->stats.latecol += rd32(IGC_LATECOL); + adapter->stats.dc += rd32(IGC_DC); + adapter->stats.rlec += rd32(IGC_RLEC); + adapter->stats.xonrxc += rd32(IGC_XONRXC); + adapter->stats.xontxc += rd32(IGC_XONTXC); + adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); + adapter->stats.xofftxc += rd32(IGC_XOFFTXC); + adapter->stats.fcruc += rd32(IGC_FCRUC); + adapter->stats.gptc += rd32(IGC_GPTC); + adapter->stats.gotc += rd32(IGC_GOTCL); + rd32(IGC_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(IGC_RNBC); + adapter->stats.ruc += rd32(IGC_RUC); + adapter->stats.rfc += rd32(IGC_RFC); + adapter->stats.rjc += rd32(IGC_RJC); + adapter->stats.tor += rd32(IGC_TORH); + adapter->stats.tot += rd32(IGC_TOTH); + adapter->stats.tpr += rd32(IGC_TPR); + + adapter->stats.ptc64 += rd32(IGC_PTC64); + adapter->stats.ptc127 += rd32(IGC_PTC127); + adapter->stats.ptc255 += rd32(IGC_PTC255); + adapter->stats.ptc511 += rd32(IGC_PTC511); + adapter->stats.ptc1023 += rd32(IGC_PTC1023); + adapter->stats.ptc1522 += rd32(IGC_PTC1522); + + adapter->stats.mptc += rd32(IGC_MPTC); + adapter->stats.bptc += rd32(IGC_BPTC); + + adapter->stats.tpt += rd32(IGC_TPT); + adapter->stats.colc += rd32(IGC_COLC); + + adapter->stats.algnerrc += rd32(IGC_ALGNERRC); + + adapter->stats.tsctc += rd32(IGC_TSCTC); + adapter->stats.tsctfc += rd32(IGC_TSCTFC); + + adapter->stats.iac += rd32(IGC_IAC); + adapter->stats.icrxoc += rd32(IGC_ICRXOC); + adapter->stats.icrxptc += rd32(IGC_ICRXPTC); + adapter->stats.icrxatc += rd32(IGC_ICRXATC); + adapter->stats.ictxptc += rd32(IGC_ICTXPTC); + adapter->stats.ictxatc += rd32(IGC_ICTXATC); + adapter->stats.ictxqec += rd32(IGC_ICTXQEC); + adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(IGC_MGTPTC); + adapter->stats.mgprc += rd32(IGC_MGTPRC); + adapter->stats.mgpdc += rd32(IGC_MGTPDC); } static void igc_nfc_filter_exit(struct igc_adapter *adapter) { + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_erase_filter(adapter, rule); + + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igc_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igc_nfc_filter_restore(struct igc_adapter *adapter) +{ + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igc_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); } /** @@ -1890,6 +2127,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev) return &netdev->stats; } +static netdev_features_t igc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igc_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Add VLAN support */ + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) + return 0; + + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igc_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igc_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + + netdev->features = features; + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + + return 1; +} + +static netdev_features_t +igc_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPv4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + /** * igc_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -1906,6 +2223,7 @@ static void igc_configure(struct igc_adapter *adapter) igc_setup_mrqc(adapter); igc_setup_rctl(adapter); + igc_nfc_filter_restore(adapter); igc_configure_tx(adapter); igc_configure_rx(adapter); @@ -1967,6 +2285,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter) igc_rar_set_index(adapter, 0); } +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGC_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGC_MAC_STATE_SRC_ADDR) != + (flags & IGC_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_add_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for the first empty entry in the MAC table. + * Do not touch entries at the end of the table reserved for the VF MAC + * addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!igc_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) + continue; + + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags; + + igc_rar_set_index(adapter, i); + return i; + } + + return -ENOSPC; +} + +int igc_add_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_add_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGC_MAC_STATE_SRC_ADDR can be used. + */ +static int igc_del_mac_filter_flags(struct igc_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ + struct igc_hw *hw = &adapter->hw; + int rar_entries = hw->mac.rar_entry_count; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Search for matching entry in the MAC table based on given address + * and queue. Do not touch entries at the end of the table reserved + * for the VF MAC addresses. + */ + for (i = 0; i < rar_entries; i++) { + if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE)) + continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; + if (adapter->mac_table[i].queue != queue) + continue; + if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) + continue; + + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } + + igc_rar_set_index(adapter, i); + return 0; + } + + return -ENOENT; +} + +int igc_del_mac_steering_filter(struct igc_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igc_del_mac_filter_flags(adapter, addr, queue, + IGC_MAC_STATE_QUEUE_STEERING | flags); +} + /** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3434,6 +3873,9 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, + .ndo_fix_features = igc_fix_features, + .ndo_set_features = igc_set_features, + .ndo_features_check = igc_features_check, }; /* PCIe configuration access */ @@ -3663,6 +4105,9 @@ static int igc_probe(struct pci_dev *pdev, if (err) goto err_sw_init; + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= NETIF_F_NTUPLE; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 5afe7a8d3faf..50d7c04dccf5 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -80,8 +80,23 @@ /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ +/* RSS registers */ +#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */ + +/* Filtering Registers */ +#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +/* ETQF register bit definitions */ +#define IGC_ETQF_FILTER_ENABLE BIT(26) +#define IGC_ETQF_QUEUE_ENABLE BIT(31) +#define IGC_ETQF_QUEUE_SHIFT 16 +#define IGC_ETQF_QUEUE_MASK 0x00070000 +#define IGC_ETQF_ETYPE_MASK 0x0000FFFF + /* Redirection Table - RW Array */ #define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) +/* RSS Random Key - RW Array */ +#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* Receive Register Descriptions */ #define IGC_RCTL 0x00100 /* Rx Control - RW */ @@ -101,6 +116,7 @@ #define IGC_UTA 0x0A000 /* Unicast Table Array - RW */ #define IGC_RAL(_n) (0x05400 + ((_n) * 0x08)) #define IGC_RAH(_n) (0x05404 + ((_n) * 0x08)) +#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ /* Transmit Register Descriptions */ #define IGC_TCTL 0x00400 /* Tx Control - RW */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e100054a3765..7b903206b534 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1800,7 +1800,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, #ifdef IXGBE_FCOE static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; @@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, break; /* fall through */ default: - return fallback(dev, skb, sb_dev); + return netdev_pick_tx(dev, skb, sb_dev); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -9796,7 +9795,7 @@ static int ixgbe_set_features(struct net_device *netdev, NETIF_F_HW_VLAN_CTAG_FILTER)) ixgbe_set_rx_mode(netdev); - return 0; + return 1; } /** diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 49e23afa05a2..d189ed247665 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -895,7 +895,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring, /* Determine available headroom for copy */ headlen = size; if (headlen > IXGBEVF_RX_HDR_SIZE) - headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE); + headlen = eth_get_headlen(skb->dev, xdp->data, + IXGBEVF_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ memcpy(__skb_put(skb, headlen), xdp->data, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c0a3718b2e2a..a715277ecf81 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2467,7 +2467,7 @@ out: if (txq->count >= txq->tx_stop_threshold) netif_tx_stop_queue(nq); - if (!skb->xmit_more || netif_xmit_stopped(nq) || + if (!netdev_xmit_more() || netif_xmit_stopped(nq) || txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) mvneta_txq_pend_desc_add(pp, txq, frags); else @@ -3385,6 +3385,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, phylink_set(mask, 1000baseX_Full); } if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseT_Full); phylink_set(mask, 2500baseX_Full); } @@ -4475,15 +4476,14 @@ static int mvneta_probe(struct platform_device *pdev) int err; int cpu; - dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); + dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port), + txq_number, rxq_number); if (!dev) return -ENOMEM; dev->irq = irq_of_parse_and_map(dn, 0); - if (dev->irq == 0) { - err = -EINVAL; - goto err_free_netdev; - } + if (dev->irq == 0) + return -EINVAL; phy_mode = of_get_phy_mode(dn); if (phy_mode < 0) { @@ -4704,8 +4704,6 @@ err_free_phylink: phylink_destroy(pp->phylink); err_free_irq: irq_dispose_mapping(dev->irq); -err_free_netdev: - free_netdev(dev); return err; } @@ -4722,7 +4720,6 @@ static int mvneta_remove(struct platform_device *pdev) free_percpu(pp->stats); irq_dispose_mapping(dev->irq); phylink_destroy(pp->phylink); - free_netdev(dev); if (pp->bm_priv) { mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index ff0f4c503f53..6171270a016c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -14,6 +14,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/phylink.h> +#include <net/flow_offload.h> /* Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) @@ -101,6 +102,7 @@ #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 #define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7 #define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x) +#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3) #define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f #define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9) #define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7 @@ -123,13 +125,18 @@ #define MVPP22_CLS_C2_TCAM_DATA2 0x1b18 #define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c #define MVPP22_CLS_C2_TCAM_DATA4 0x1b20 +#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f) #define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8) +#define MVPP22_CLS_C2_PORT_MASK (0xff << 8) +#define MVPP22_CLS_C2_TCAM_INV 0x1b24 +#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31) #define MVPP22_CLS_C2_HIT_CTR 0x1b50 #define MVPP22_CLS_C2_ACT 0x1b60 #define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19) #define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13) #define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11) #define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9) +#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7) #define MVPP22_CLS_C2_ATTR0 0x1b64 #define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24) #define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f @@ -610,6 +617,12 @@ #define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) #define MVPP2_BIT_IN_WORD(bit) ((bit) % 32) +#define MVPP2_N_PRS_FLOWS 52 +#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4 + +/* There are 7 supported high-level flows */ +#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7) + /* RSS constants */ #define MVPP22_RSS_TABLE_ENTRIES 32 @@ -710,6 +723,7 @@ enum mvpp2_prs_l3_cast { #define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) /* Definitions */ +struct mvpp2_dbgfs_entries; /* Shared Packet Processor resources */ struct mvpp2 { @@ -771,6 +785,9 @@ struct mvpp2 { /* Debugfs root entry */ struct dentry *dbgfs_dir; + + /* Debugfs entries private data */ + struct mvpp2_dbgfs_entries *dbgfs_entries; }; struct mvpp2_pcpu_stats { @@ -802,6 +819,37 @@ struct mvpp2_queue_vector { struct cpumask *mask; }; +/* Internal represention of a Flow Steering rule */ +struct mvpp2_rfs_rule { + /* Rule location inside the flow*/ + int loc; + + /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */ + int flow_type; + + /* Index of the C2 TCAM entry handling this rule */ + int c2_index; + + /* Header fields that needs to be extracted to match this flow */ + u16 hek_fields; + + /* CLS engine : only c2 is supported for now. */ + u8 engine; + + /* TCAM key and mask for C2-based steering. These fields should be + * encapsulated in a union should we add more engines. + */ + u64 c2_tcam; + u64 c2_tcam_mask; + + struct flow_rule *flow; +}; + +struct mvpp2_ethtool_fs { + struct mvpp2_rfs_rule rule; + struct ethtool_rxnfc rxnfc; +}; + struct mvpp2_port { u8 id; @@ -873,6 +921,10 @@ struct mvpp2_port { /* RSS indirection table */ u32 indir[MVPP22_RSS_TABLE_ENTRIES]; + + /* List of steering rules active on that port */ + struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES]; + int n_rfs_rules; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index efdb7a656835..4989fb13244f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -22,302 +22,302 @@ } \ } -static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = { +static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { /* TCP over IPv4 flows, Not fragmented, no vlan tag */ - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv4 flows, Not fragmented, with vlan tag */ - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* TCP over IPv4 flows, fragmented, no vlan tag */ - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv4 flows, fragmented, with vlan tag */ - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* UDP over IPv4 flows, Not fragmented, no vlan tag */ - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP4_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv4 flows, Not fragmented, with vlan tag */ - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG, MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* UDP over IPv4 flows, fragmented, no vlan tag */ - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv4 flows, fragmented, with vlan tag */ - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* TCP over IPv6 flows, not fragmented, no vlan tag */ - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv6 flows, not fragmented, with vlan tag */ - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* TCP over IPv6 flows, fragmented, no vlan tag */ - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* TCP over IPv6 flows, fragmented, with vlan tag */ - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_IP_MASK), /* UDP over IPv6 flows, not fragmented, no vlan tag */ - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG, MVPP22_CLS_HEK_IP6_5T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv6 flows, not fragmented, with vlan tag */ - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG, MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* UDP over IPv6 flows, fragmented, no vlan tag */ - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK), /* UDP over IPv6 flows, fragmented, with vlan tag */ - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), - MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_IP_MASK), /* IPv4 flows, no vlan tag */ - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG, MVPP22_CLS_HEK_IP4_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv4 flows, with vlan tag */ - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG, MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP4_OTHER, MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv6 flows, no vlan tag */ - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG, MVPP22_CLS_HEK_IP6_2T, MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK), /* IPv6 flows, with vlan tag */ - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK), - MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG, MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK), /* Non IP flow, no vlan tag */ - MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG, 0, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ - MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG, + MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, MVPP22_CLS_HEK_OPT_VLAN, 0, 0), }; @@ -344,9 +344,9 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv, struct mvpp2_cls_flow_entry *fe) { mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index) @@ -429,12 +429,6 @@ static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe, fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL; } -static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq) -{ - fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK); - fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq); -} - static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe, bool is_last) { @@ -454,9 +448,22 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe, fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port); } +static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe, + u32 port) +{ + fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port); +} + +static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe, + u8 lu_type) +{ + fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK); + fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type); +} + /* Initialize the parser entry for the given flow */ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, - struct mvpp2_cls_flow *flow) + const struct mvpp2_cls_flow *flow) { mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri, flow->prs_ri.ri_mask); @@ -464,7 +471,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv, /* Initialize the Lookup Id table entry for the given flow */ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, - struct mvpp2_cls_flow *flow) + const struct mvpp2_cls_flow *flow) { struct mvpp2_cls_lookup_entry le; @@ -477,7 +484,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, /* We point on the first lookup in the sequence for the flow, that is * the C2 lookup. */ - le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id)); + le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id)); /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */ le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; @@ -485,21 +492,111 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv, mvpp2_cls_lookup_write(priv, &le); } +static void mvpp2_cls_c2_write(struct mvpp2 *priv, + struct mvpp2_cls_c2_entry *c2) +{ + u32 val; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); + + val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); + if (c2->valid) + val &= ~MVPP22_CLS_C2_TCAM_INV_BIT; + else + val |= MVPP22_CLS_C2_TCAM_INV_BIT; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val); + + mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); + + mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); + mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); + + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); + /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */ + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); +} + +void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, + struct mvpp2_cls_c2_entry *c2) +{ + u32 val; + mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); + + c2->index = index; + + c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); + c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); + c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); + c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); + c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); + + c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); + + c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); + c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); + c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); + c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); + + val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV); + c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT); +} + +static int mvpp2_cls_ethtool_flow_to_type(int flow_type) +{ + switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) { + case TCP_V4_FLOW: + return MVPP22_FLOW_TCP4; + case TCP_V6_FLOW: + return MVPP22_FLOW_TCP6; + case UDP_V4_FLOW: + return MVPP22_FLOW_UDP4; + case UDP_V6_FLOW: + return MVPP22_FLOW_UDP6; + case IPV4_FLOW: + return MVPP22_FLOW_IP4; + case IPV6_FLOW: + return MVPP22_FLOW_IP6; + default: + return -EOPNOTSUPP; + } +} + +static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc) +{ + return MVPP22_CLS_C2_RFS_LOC(port->id, loc); +} + /* Initialize the flow table entries for the given flow */ -static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) +static void mvpp2_cls_flow_init(struct mvpp2 *priv, + const struct mvpp2_cls_flow *flow) { struct mvpp2_cls_flow_entry fe; - int i; + int i, pri = 0; + + /* Assign default values to all entries in the flow */ + for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id); + i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) { + memset(&fe, 0, sizeof(fe)); + fe.index = i; + mvpp2_cls_flow_pri_set(&fe, pri++); + + if (i == MVPP2_CLS_FLT_LAST(flow->flow_id)) + mvpp2_cls_flow_last_set(&fe, 1); + + mvpp2_cls_flow_write(priv, &fe); + } - /* C2 lookup */ - memset(&fe, 0, sizeof(fe)); - fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id); + /* RSS config C2 lookup */ + mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id), + &fe); mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2); mvpp2_cls_flow_port_id_sel(&fe, true); - mvpp2_cls_flow_last_set(&fe, 0); - mvpp2_cls_flow_pri_set(&fe, 0); - mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1); + mvpp2_cls_flow_lu_type_set(&fe, MVPP22_FLOW_ETHERNET); /* Add all ports */ for (i = 0; i < MVPP2_MAX_PORTS; i++) @@ -509,22 +606,19 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow) /* C3Hx lookups */ for (i = 0; i < MVPP2_MAX_PORTS; i++) { - memset(&fe, 0, sizeof(fe)); - fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id); + mvpp2_cls_flow_read(priv, + MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id), + &fe); + /* Set a default engine. Will be overwritten when setting the + * real HEK parameters + */ + mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA); mvpp2_cls_flow_port_id_sel(&fe, true); - mvpp2_cls_flow_pri_set(&fe, i + 1); - mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE); mvpp2_cls_flow_port_add(&fe, BIT(i)); mvpp2_cls_flow_write(priv, &fe); } - - /* Update the last entry */ - mvpp2_cls_flow_last_set(&fe, 1); - mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST); - - mvpp2_cls_flow_write(priv, &fe); } /* Adds a field to the Header Extracted Key generation parameters*/ @@ -555,6 +649,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { switch (BIT(i)) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + field_id = MVPP22_CLS_FIELD_MAC_DA; + break; case MVPP22_CLS_HEK_OPT_VLAN: field_id = MVPP22_CLS_FIELD_VLAN; break; @@ -586,9 +683,29 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe, return 0; } -struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +/* Returns the size, in bits, of the corresponding HEK field */ +static int mvpp2_cls_hek_field_size(u32 field) { - if (flow >= MVPP2_N_FLOWS) + switch (field) { + case MVPP22_CLS_HEK_OPT_MAC_DA: + return 48; + case MVPP22_CLS_HEK_OPT_IP4SA: + case MVPP22_CLS_HEK_OPT_IP4DA: + return 32; + case MVPP22_CLS_HEK_OPT_IP6SA: + case MVPP22_CLS_HEK_OPT_IP6DA: + return 128; + case MVPP22_CLS_HEK_OPT_L4SIP: + case MVPP22_CLS_HEK_OPT_L4DIP: + return 16; + default: + return -1; + } +} + +const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) +{ + if (flow >= MVPP2_N_PRS_FLOWS) return NULL; return &cls_flows[flow]; @@ -608,21 +725,17 @@ struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow) static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type, u16 requested_opts) { + const struct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; - struct mvpp2_cls_flow *flow; int i, engine, flow_index; u16 hash_opts; - for (i = 0; i < MVPP2_N_FLOWS; i++) { + for_each_cls_flow_id_with_type(i, flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return -EINVAL; - if (flow->flow_type != flow_type) - continue; - - flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, - flow->flow_id); + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); @@ -697,21 +810,17 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe) */ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) { + const struct mvpp2_cls_flow *flow; struct mvpp2_cls_flow_entry fe; - struct mvpp2_cls_flow *flow; int i, flow_index; u16 hash_opts = 0; - for (i = 0; i < MVPP2_N_FLOWS; i++) { + for_each_cls_flow_id_with_type(i, flow_type) { flow = mvpp2_cls_flow_get(i); if (!flow) return 0; - if (flow->flow_type != flow_type) - continue; - - flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id, - flow->flow_id); + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); @@ -723,10 +832,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type) static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) { - struct mvpp2_cls_flow *flow; + const struct mvpp2_cls_flow *flow; int i; - for (i = 0; i < MVPP2_N_FLOWS; i++) { + for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { flow = mvpp2_cls_flow_get(i); if (!flow) break; @@ -737,47 +846,6 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv) } } -static void mvpp2_cls_c2_write(struct mvpp2 *priv, - struct mvpp2_cls_c2_entry *c2) -{ - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index); - - /* Write TCAM */ - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]); - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]); - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]); - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]); - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]); - - mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act); - - mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]); - mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]); - mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]); - mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]); -} - -void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, - struct mvpp2_cls_c2_entry *c2) -{ - mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index); - - c2->index = index; - - c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0); - c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1); - c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2); - c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3); - c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4); - - c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT); - - c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0); - c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1); - c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2); - c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3); -} - static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) { struct mvpp2_cls_c2_entry c2; @@ -791,6 +859,10 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + /* Match on Lookup Type */ + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_FLOW_ETHERNET); + /* Update RSS status after matching this entry */ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK); @@ -809,6 +881,8 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port) c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | MVPP22_CLS_C2_ATTR0_QLOW(ql); + c2.valid = true; + mvpp2_cls_c2_write(port->priv, &c2); } @@ -817,6 +891,7 @@ void mvpp2_cls_init(struct mvpp2 *priv) { struct mvpp2_cls_lookup_entry le; struct mvpp2_cls_flow_entry fe; + struct mvpp2_cls_c2_entry c2; int index; /* Enable classifier */ @@ -840,6 +915,14 @@ void mvpp2_cls_init(struct mvpp2 *priv) mvpp2_cls_lookup_write(priv, &le); } + /* Clear C2 TCAM engine table */ + memset(&c2, 0, sizeof(c2)); + c2.valid = false; + for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) { + c2.index = index; + mvpp2_cls_c2_write(priv, &c2); + } + mvpp2_cls_port_init_flows(priv); } @@ -902,16 +985,28 @@ static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port) mvpp2_cls_c2_write(port->priv, &c2); } -void mvpp22_rss_enable(struct mvpp2_port *port) +void mvpp22_port_rss_enable(struct mvpp2_port *port) { mvpp2_rss_port_c2_enable(port); } -void mvpp22_rss_disable(struct mvpp2_port *port) +void mvpp22_port_rss_disable(struct mvpp2_port *port) { mvpp2_rss_port_c2_disable(port); } +static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry) +{ + struct mvpp2_cls_c2_entry c2; + + mvpp2_cls_c2_read(port->priv, entry, &c2); + + /* Clear the port map so that the entry doesn't match anymore */ + c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id))); + + mvpp2_cls_c2_write(port->priv, &c2); +} + /* Set CPU queue number for oversize packets */ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) { @@ -928,6 +1023,290 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } +static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + struct flow_action_entry *act; + struct mvpp2_cls_c2_entry c2; + u8 qh, ql, pmap; + + memset(&c2, 0, sizeof(c2)); + + c2.index = mvpp2_cls_c2_port_flow_index(port, rule->loc); + if (c2.index < 0) + return -EINVAL; + + act = &rule->flow->action.entries[0]; + + rule->c2_index = c2.index; + + c2.tcam[0] = (rule->c2_tcam & 0xffff) | + ((rule->c2_tcam_mask & 0xffff) << 16); + c2.tcam[1] = ((rule->c2_tcam >> 16) & 0xffff) | + (((rule->c2_tcam_mask >> 16) & 0xffff) << 16); + c2.tcam[2] = ((rule->c2_tcam >> 32) & 0xffff) | + (((rule->c2_tcam_mask >> 32) & 0xffff) << 16); + c2.tcam[3] = ((rule->c2_tcam >> 48) & 0xffff) | + (((rule->c2_tcam_mask >> 48) & 0xffff) << 16); + + pmap = BIT(port->id); + c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap); + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap)); + + /* Match on Lookup Type */ + c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK)); + c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc); + + if (act->id == FLOW_ACTION_DROP) { + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK); + } else { + /* We want to keep the default color derived from the Header + * Parser drop entries, for VLAN and MAC filtering. This will + * assign a default color of Green or Red, and we want matches + * with a non-drop action to keep that color. + */ + c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK); + + /* Mark packet as "forwarded to software", needed for RSS */ + c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK); + + c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) | + MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK); + + qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; + ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK; + + c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) | + MVPP22_CLS_C2_ATTR0_QLOW(ql); + } + + c2.valid = true; + + mvpp2_cls_c2_write(port->priv, &c2); + + return 0; +} + +static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + return mvpp2_port_c2_tcam_rule_add(port, rule); +} + +static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2_cls_flow_entry fe; + int index, i; + + for_each_cls_flow_id_containing_type(i, rule->flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); + + mvpp2_cls_flow_read(port->priv, index, &fe); + mvpp2_cls_flow_port_remove(&fe, BIT(port->id)); + mvpp2_cls_flow_write(port->priv, &fe); + } + + if (rule->c2_index >= 0) + mvpp22_port_c2_lookup_disable(port, rule->c2_index); + + return 0; +} + +static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port, + struct mvpp2_rfs_rule *rule) +{ + const struct mvpp2_cls_flow *flow; + struct mvpp2 *priv = port->priv; + struct mvpp2_cls_flow_entry fe; + int index, ret, i; + + if (rule->engine != MVPP22_CLS_ENGINE_C2) + return -EOPNOTSUPP; + + ret = mvpp2_port_c2_rfs_rule_insert(port, rule); + if (ret) + return ret; + + for_each_cls_flow_id_containing_type(i, rule->flow_type) { + flow = mvpp2_cls_flow_get(i); + if (!flow) + return 0; + + index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc); + + mvpp2_cls_flow_read(priv, index, &fe); + mvpp2_cls_flow_eng_set(&fe, rule->engine); + mvpp2_cls_flow_port_id_sel(&fe, true); + mvpp2_flow_set_hek_fields(&fe, rule->hek_fields); + mvpp2_cls_flow_lu_type_set(&fe, rule->loc); + mvpp2_cls_flow_port_add(&fe, 0xf); + + mvpp2_cls_flow_write(priv, &fe); + } + + return 0; +} + +static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule) +{ + struct flow_rule *flow = rule->flow; + struct flow_action_entry *act; + int offs = 64; + + act = &flow->action.entries[0]; + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + if (match.mask->src) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP; + offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP); + + rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs; + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs; + } + + if (match.mask->dst) { + rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP; + offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP); + + rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs; + rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs; + } + } + + if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS) + return -EOPNOTSUPP; + + return 0; +} + +static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule) +{ + struct flow_rule *flow = rule->flow; + struct flow_action_entry *act; + + act = &flow->action.entries[0]; + if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP) + return -EOPNOTSUPP; + + /* For now, only use the C2 engine which has a HEK size limited to 64 + * bits for TCAM matching. + */ + rule->engine = MVPP22_CLS_ENGINE_C2; + + if (mvpp2_cls_c2_build_match(rule)) + return -EINVAL; + + return 0; +} + +int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, + struct ethtool_rxnfc *rxnfc) +{ + struct mvpp2_ethtool_fs *efs; + + if (rxnfc->fs.location >= MVPP2_N_RFS_RULES) + return -EINVAL; + + efs = port->rfs_rules[rxnfc->fs.location]; + if (!efs) + return -ENOENT; + + memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc)); + + return 0; +} + +int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, + struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct ethtool_rx_flow_rule *ethtool_rule; + struct mvpp2_ethtool_fs *efs, *old_efs; + int ret = 0; + + if (info->fs.location >= 4 || + info->fs.location < 0) + return -EINVAL; + + efs = kzalloc(sizeof(*efs), GFP_KERNEL); + if (!efs) + return -ENOMEM; + + input.fs = &info->fs; + + ethtool_rule = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(ethtool_rule)) { + ret = PTR_ERR(ethtool_rule); + goto clean_rule; + } + + efs->rule.flow = ethtool_rule->rule; + efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); + + ret = mvpp2_cls_rfs_parse_rule(&efs->rule); + if (ret) + goto clean_eth_rule; + + efs->rule.loc = info->fs.location; + + /* Replace an already existing rule */ + if (port->rfs_rules[efs->rule.loc]) { + old_efs = port->rfs_rules[efs->rule.loc]; + ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule); + if (ret) + goto clean_eth_rule; + kfree(old_efs); + port->n_rfs_rules--; + } + + ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule); + if (ret) + goto clean_eth_rule; + + memcpy(&efs->rxnfc, info, sizeof(*info)); + port->rfs_rules[efs->rule.loc] = efs; + port->n_rfs_rules++; + + return ret; + +clean_eth_rule: + ethtool_rx_flow_rule_destroy(ethtool_rule); +clean_rule: + kfree(efs); + return ret; +} + +int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct ethtool_rxnfc *info) +{ + struct mvpp2_ethtool_fs *efs; + int ret; + + efs = port->rfs_rules[info->fs.location]; + if (!efs) + return -EINVAL; + + /* Remove the rule from the engines. */ + ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule); + if (ret) + return ret; + + port->n_rfs_rules--; + port->rfs_rules[info->fs.location] = NULL; + kfree(efs); + + return 0; +} + static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq) { int nrxqs, cpu, cpus = num_possible_cpus(); @@ -965,19 +1344,22 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table) int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) { u16 hash_opts = 0; + u32 flow_type; - switch (info->flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - case TCP_V6_FLOW: - case UDP_V6_FLOW: + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); + + switch (flow_type) { + case MVPP22_FLOW_TCP4: + case MVPP22_FLOW_UDP4: + case MVPP22_FLOW_TCP6: + case MVPP22_FLOW_UDP6: if (info->data & RXH_L4_B_0_1) hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP; if (info->data & RXH_L4_B_2_3) hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP; /* Fallthrough */ - case IPV4_FLOW: - case IPV6_FLOW: + case MVPP22_FLOW_IP4: + case MVPP22_FLOW_IP6: if (info->data & RXH_L2DA) hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA; if (info->data & RXH_VLAN) @@ -994,15 +1376,18 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) default: return -EOPNOTSUPP; } - return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts); + return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); } int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) { unsigned long hash_opts; + u32 flow_type; int i; - hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type); + flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type); + + hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type); info->data = 0; for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) { @@ -1037,7 +1422,7 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) return 0; } -void mvpp22_rss_port_init(struct mvpp2_port *port) +void mvpp22_port_rss_init(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; int i; @@ -1065,10 +1450,10 @@ void mvpp22_rss_port_init(struct mvpp2_port *port) mvpp22_rss_fill_table(port, port->id); /* Configure default flows */ - mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T); - mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T); - mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); - mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); - mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T); - mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T); + mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T); } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 089f05f29891..56b617375a65 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -71,14 +71,6 @@ enum mvpp2_cls_field_id { MVPP22_CLS_FIELD_L4DIP = 0x1e, }; -enum mvpp2_cls_flow_seq { - MVPP2_CLS_FLOW_SEQ_NORMAL = 0, - MVPP2_CLS_FLOW_SEQ_FIRST1, - MVPP2_CLS_FLOW_SEQ_FIRST2, - MVPP2_CLS_FLOW_SEQ_LAST, - MVPP2_CLS_FLOW_SEQ_MIDDLE -}; - /* Classifier C2 engine constants */ #define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16) @@ -100,39 +92,62 @@ enum mvpp22_cls_c2_fwd_action { MVPP22_C2_FWD_HW_LOW_LAT_LOCK, }; +enum mvpp22_cls_c2_color_action { + MVPP22_C2_COL_NO_UPD = 0, + MVPP22_C2_COL_NO_UPD_LOCK, + MVPP22_C2_COL_GREEN, + MVPP22_C2_COL_GREEN_LOCK, + MVPP22_C2_COL_YELLOW, + MVPP22_C2_COL_YELLOW_LOCK, + MVPP22_C2_COL_RED, /* Drop */ + MVPP22_C2_COL_RED_LOCK, /* Drop */ +}; + #define MVPP2_CLS_C2_TCAM_WORDS 5 #define MVPP2_CLS_C2_ATTR_WORDS 5 struct mvpp2_cls_c2_entry { u32 index; + /* TCAM lookup key */ u32 tcam[MVPP2_CLS_C2_TCAM_WORDS]; + /* Actions to perform upon TCAM match */ u32 act; + /* Attributes relative to the actions to perform */ u32 attr[MVPP2_CLS_C2_ATTR_WORDS]; + /* Entry validity */ + u8 valid; }; +#define MVPP22_FLOW_ETHER_BIT BIT(0) +#define MVPP22_FLOW_IP4_BIT BIT(1) +#define MVPP22_FLOW_IP6_BIT BIT(2) +#define MVPP22_FLOW_TCP_BIT BIT(3) +#define MVPP22_FLOW_UDP_BIT BIT(4) + +#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT) +#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT) +#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT) +#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT) +#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT) +#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT) +#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT) + /* Classifier C2 engine entries */ -#define MVPP22_CLS_C2_RSS_ENTRY(port) (port) -#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS +#define MVPP22_CLS_C2_N_ENTRIES 256 -/* RSS flow entries in the flow table. We have 2 entries per port for RSS. - * - * The first performs a lookup using the C2 TCAM engine, to tag the - * packet for software forwarding (needed for RSS), enable or disable RSS, and - * assign the default rx queue. - * - * The second configures the hash generation, by specifying which fields of the - * packet header are used to generate the hash, and specifies the relevant hash - * engine to use. +/* Number of per-port dedicated entries in the C2 TCAM */ +#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW + +/* Each port has oen range per flow type + one entry controling the global RSS + * setting and the default rx queue */ -#define MVPP22_RSS_FLOW_C2_OFFS 0 -#define MVPP22_RSS_FLOW_HASH_OFFS 1 -#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1) +#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1) +#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE) +#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1) -#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ - MVPP22_RSS_FLOW_C2_OFFS) -#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \ - MVPP22_RSS_FLOW_HASH_OFFS) -#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port) +#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p)) + +#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc)) /* Packet flow ID */ enum mvpp2_prs_flow { @@ -162,6 +177,11 @@ enum mvpp2_prs_flow { MVPP2_FL_LAST, }; +/* LU Type defined for all engines, and specified in the flow table */ +#define MVPP2_CLS_LU_TYPE_MASK 0x3f + +#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START) + struct mvpp2_cls_flow { /* The L2-L4 traffic flow type */ int flow_type; @@ -176,12 +196,48 @@ struct mvpp2_cls_flow { struct mvpp2_prs_result_info prs_ri; }; -#define MVPP2_N_FLOWS 52 +#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16) +#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \ + MVPP2_CLS_FLT_ENTRIES_PER_FLOW) + +#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \ + ((port) * MVPP2_MAX_PORTS) + \ + (rfs_n)) + +#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0)) +#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port)) +#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \ + MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1) + +/* Iterate on each classifier flow id. Sets 'i' to be the index of the first + * entry in the cls_flows table for each different flow_id. + * This relies on entries having the same flow_id in the cls_flows table being + * contiguous. + */ +#define for_each_cls_flow_id(i) \ + for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \ + if ((i) > 0 && \ + cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \ + continue; \ + else + +/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be + * the index of the first entry in the cls_flow table for each different flow_id + * that has the given flow_type. This allows to operate on all flows that + * matches a given ethtool flow type. + */ +#define for_each_cls_flow_id_with_type(i, type) \ + for_each_cls_flow_id((i)) \ + if (cls_flows[(i)].flow_type != (type)) \ + continue; \ + else + +#define for_each_cls_flow_id_containing_type(i, type) \ + for_each_cls_flow_id((i)) \ + if ((cls_flows[(i)].flow_type & (type)) != (type)) \ + continue; \ + else -#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1) -#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW) -#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \ - (port) + 1) struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; @@ -194,11 +250,10 @@ struct mvpp2_cls_lookup_entry { }; void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table); +void mvpp22_port_rss_init(struct mvpp2_port *port); -void mvpp22_rss_port_init(struct mvpp2_port *port); - -void mvpp22_rss_enable(struct mvpp2_port *port); -void mvpp22_rss_disable(struct mvpp2_port *port); +void mvpp22_port_rss_enable(struct mvpp2_port *port); +void mvpp22_port_rss_disable(struct mvpp2_port *port); int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); @@ -213,7 +268,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe); u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe); -struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); +const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow); u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index); @@ -230,4 +285,13 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index); void mvpp2_cls_c2_read(struct mvpp2 *priv, int index, struct mvpp2_cls_c2_entry *c2); +int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port, + struct ethtool_rxnfc *rxnfc); + +int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, + struct ethtool_rxnfc *info); + +int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port, + struct ethtool_rxnfc *info); + #endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index f9744a61e5dd..0ee39ea47b6b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -18,22 +18,48 @@ struct mvpp2_dbgfs_prs_entry { struct mvpp2 *priv; }; +struct mvpp2_dbgfs_c2_entry { + int id; + struct mvpp2 *priv; +}; + struct mvpp2_dbgfs_flow_entry { int flow; struct mvpp2 *priv; }; +struct mvpp2_dbgfs_flow_tbl_entry { + int id; + struct mvpp2 *priv; +}; + struct mvpp2_dbgfs_port_flow_entry { struct mvpp2_port *port; struct mvpp2_dbgfs_flow_entry *dbg_fe; }; +struct mvpp2_dbgfs_entries { + /* Entries for Header Parser debug info */ + struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE]; + + /* Entries for Classifier C2 engine debug info */ + struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES]; + + /* Entries for Classifier Flow Table debug info */ + struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE]; + + /* Entries for Classifier flows debug info */ + struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS]; + + /* Entries for per-port flows debug info */ + struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS]; +}; + static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused) { - struct mvpp2_dbgfs_flow_entry *entry = s->private; - int id = MVPP2_FLOW_C2_ENTRY(entry->flow); + struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private; - u32 hits = mvpp2_cls_flow_hits(entry->priv, id); + u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id); seq_printf(s, "%u\n", hits); @@ -58,7 +84,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits); static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_flow_entry *entry = s->private; - struct mvpp2_cls_flow *f; + const struct mvpp2_cls_flow *f; const char *flow_name; f = mvpp2_cls_flow_get(entry->flow); @@ -93,30 +119,12 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused) return 0; } -static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file) -{ - return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private); -} - -static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq = file->private_data; - struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private; - - kfree(flow_entry); - return single_release(inode, file); -} - -static const struct file_operations mvpp2_dbgfs_flow_type_fops = { - .open = mvpp2_dbgfs_flow_type_open, - .read = seq_read, - .release = mvpp2_dbgfs_flow_type_release, -}; +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type); static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused) { - struct mvpp2_dbgfs_flow_entry *entry = s->private; - struct mvpp2_cls_flow *f; + const struct mvpp2_dbgfs_flow_entry *entry = s->private; + const struct mvpp2_cls_flow *f; f = mvpp2_cls_flow_get(entry->flow); if (!f) @@ -134,7 +142,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) struct mvpp2_dbgfs_port_flow_entry *entry = s->private; struct mvpp2_port *port = entry->port; struct mvpp2_cls_flow_entry fe; - struct mvpp2_cls_flow *f; + const struct mvpp2_cls_flow *f; int flow_index; u16 hash_opts; @@ -142,7 +150,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) if (!f) return -EINVAL; - flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); @@ -153,42 +161,21 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused) return 0; } -static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode, - struct file *file) -{ - return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show, - inode->i_private); -} - -static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode, - struct file *file) -{ - struct seq_file *seq = file->private_data; - struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private; - - kfree(flow_entry); - return single_release(inode, file); -} - -static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = { - .open = mvpp2_dbgfs_port_flow_hash_opt_open, - .read = seq_read, - .release = mvpp2_dbgfs_port_flow_hash_opt_release, -}; +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt); static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused) { struct mvpp2_dbgfs_port_flow_entry *entry = s->private; struct mvpp2_port *port = entry->port; struct mvpp2_cls_flow_entry fe; - struct mvpp2_cls_flow *f; + const struct mvpp2_cls_flow *f; int flow_index, engine; f = mvpp2_cls_flow_get(entry->dbg_fe->flow); if (!f) return -EINVAL; - flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id); + flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id); mvpp2_cls_flow_read(port->priv, flow_index, &fe); @@ -203,11 +190,10 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine); static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused) { - struct mvpp2_port *port = s->private; + struct mvpp2_dbgfs_c2_entry *entry = s->private; u32 hits; - hits = mvpp2_cls_c2_hit_count(port->priv, - MVPP22_CLS_C2_RSS_ENTRY(port->id)); + hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id); seq_printf(s, "%u\n", hits); @@ -218,11 +204,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits); static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused) { - struct mvpp2_port *port = s->private; + struct mvpp2_dbgfs_c2_entry *entry = s->private; struct mvpp2_cls_c2_entry c2; u8 qh, ql; - mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + mvpp2_cls_c2_read(entry->priv, entry->id, &c2); qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK; @@ -239,11 +225,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq); static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused) { - struct mvpp2_port *port = s->private; + struct mvpp2_dbgfs_c2_entry *entry = s->private; struct mvpp2_cls_c2_entry c2; int enabled; - mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2); + mvpp2_cls_c2_read(entry->priv, entry->id, &c2); enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN); @@ -456,25 +442,7 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused) return 0; } -static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file) -{ - return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private); -} - -static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file) -{ - struct seq_file *seq = file->private_data; - struct mvpp2_dbgfs_prs_entry *entry = seq->private; - - kfree(entry); - return single_release(inode, file); -} - -static const struct file_operations mvpp2_dbgfs_prs_valid_fops = { - .open = mvpp2_dbgfs_prs_valid_open, - .read = seq_read, - .release = mvpp2_dbgfs_prs_valid_release, -}; +DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid); static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, struct mvpp2_port *port, @@ -487,10 +455,7 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent, if (IS_ERR(port_dir)) return PTR_ERR(port_dir); - /* This will be freed by 'hash_opts' release op */ - port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL); - if (!port_entry) - return -ENOMEM; + port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id]; port_entry->port = port; port_entry->dbg_fe = entry; @@ -518,17 +483,11 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, if (!flow_entry_dir) return -ENOMEM; - /* This will be freed by 'type' release op */ - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; + entry = &priv->dbgfs_entries->flow_entries[flow]; entry->flow = flow; entry->priv = priv; - debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry, - &mvpp2_dbgfs_flow_flt_hits_fops); - debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry, &mvpp2_dbgfs_flow_dec_hits_fops); @@ -545,6 +504,7 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent, if (ret) return ret; } + return 0; } @@ -557,7 +517,7 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv) if (!flow_dir) return -ENOMEM; - for (i = 0; i < MVPP2_N_FLOWS; i++) { + for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) { ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i); if (ret) return ret; @@ -582,10 +542,7 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent, if (!prs_entry_dir) return -ENOMEM; - /* The 'valid' entry's ops will free that */ - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; + entry = &priv->dbgfs_entries->prs_entries[tid]; entry->tid = tid; entry->priv = priv; @@ -630,6 +587,98 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv) return 0; } +static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent, + struct mvpp2 *priv, int id) +{ + struct mvpp2_dbgfs_c2_entry *entry; + struct dentry *c2_entry_dir; + char c2_entry_name[10]; + + if (id >= MVPP22_CLS_C2_N_ENTRIES) + return -EINVAL; + + sprintf(c2_entry_name, "%03d", id); + + c2_entry_dir = debugfs_create_dir(c2_entry_name, parent); + if (!c2_entry_dir) + return -ENOMEM; + + entry = &priv->dbgfs_entries->c2_entries[id]; + + entry->id = id; + entry->priv = priv; + + debugfs_create_file("hits", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_hits_fops); + + debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_rxq_fops); + + debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry, + &mvpp2_dbgfs_flow_c2_enable_fops); + + return 0; +} + +static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent, + struct mvpp2 *priv, int id) +{ + struct mvpp2_dbgfs_flow_tbl_entry *entry; + struct dentry *flow_tbl_entry_dir; + char flow_tbl_entry_name[10]; + + if (id >= MVPP2_CLS_FLOWS_TBL_SIZE) + return -EINVAL; + + sprintf(flow_tbl_entry_name, "%03d", id); + + flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent); + if (!flow_tbl_entry_dir) + return -ENOMEM; + + entry = &priv->dbgfs_entries->flt_entries[id]; + + entry->id = id; + entry->priv = priv; + + debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry, + &mvpp2_dbgfs_flow_flt_hits_fops); + + return 0; +} + +static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv) +{ + struct dentry *cls_dir, *c2_dir, *flow_tbl_dir; + int i, ret; + + cls_dir = debugfs_create_dir("classifier", parent); + if (!cls_dir) + return -ENOMEM; + + c2_dir = debugfs_create_dir("c2", cls_dir); + if (!c2_dir) + return -ENOMEM; + + for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) { + ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i); + if (ret) + return ret; + } + + flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir); + if (!flow_tbl_dir) + return -ENOMEM; + + for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) { + ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i); + if (ret) + return ret; + } + + return 0; +} + static int mvpp2_dbgfs_port_init(struct dentry *parent, struct mvpp2_port *port) { @@ -648,21 +697,14 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent, debugfs_create_file("vid_filter", 0444, port_dir, port, &mvpp2_dbgfs_port_vid_fops); - debugfs_create_file("c2_hits", 0444, port_dir, port, - &mvpp2_dbgfs_flow_c2_hits_fops); - - debugfs_create_file("default_rxq", 0444, port_dir, port, - &mvpp2_dbgfs_flow_c2_rxq_fops); - - debugfs_create_file("rss_enable", 0444, port_dir, port, - &mvpp2_dbgfs_flow_c2_enable_fops); - return 0; } void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) { debugfs_remove_recursive(priv->dbgfs_dir); + + kfree(priv->dbgfs_entries); } void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) @@ -682,11 +724,18 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) return; priv->dbgfs_dir = mvpp2_dir; + priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL); + if (!priv->dbgfs_entries) + goto err; ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv); if (ret) goto err; + ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv); + if (ret) + goto err; + for (i = 0; i < priv->port_count; i++) { ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]); if (ret) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 25fbed2b8d94..56d43d9b43ef 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3741,9 +3741,9 @@ static int mvpp2_set_features(struct net_device *dev, if (changed & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) - mvpp22_rss_enable(port); + mvpp22_port_rss_enable(port); else - mvpp22_rss_disable(port); + mvpp22_port_rss_disable(port); } return 0; @@ -3937,7 +3937,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct mvpp2_port *port = netdev_priv(dev); - int ret = 0; + int ret = 0, i, loc = 0; if (!mvpp22_rss_is_supported()) return -EOPNOTSUPP; @@ -3949,6 +3949,18 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = port->n_rfs_rules; + break; + case ETHTOOL_GRXCLSRULE: + ret = mvpp2_ethtool_cls_rule_get(port, info); + break; + case ETHTOOL_GRXCLSRLALL: + for (i = 0; i < MVPP2_N_RFS_RULES; i++) { + if (port->rfs_rules[i]) + rules[loc++] = i; + } + break; default: return -ENOTSUPP; } @@ -3969,6 +3981,12 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, case ETHTOOL_SRXFH: ret = mvpp2_ethtool_rxfh_set(port, info); break; + case ETHTOOL_SRXCLSRLINS: + ret = mvpp2_ethtool_cls_rule_ins(port, info); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = mvpp2_ethtool_cls_rule_del(port, info); + break; default: return -EOPNOTSUPP; } @@ -4301,7 +4319,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_cls_port_config(port); if (mvpp22_rss_is_supported()) - mvpp22_rss_port_init(port); + mvpp22_port_rss_init(port); /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); @@ -4848,6 +4866,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct mvpp2_port *port; struct mvpp2_port_pcpu *port_pcpu; struct device_node *port_node = to_of_node(port_fwnode); + netdev_features_t features; struct net_device *dev; struct resource *res; struct phylink *phylink; @@ -4856,7 +4875,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, unsigned long flags = 0; bool has_tx_irqs; u32 id; - int features; int phy_mode; int err, i; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 549d36497b8c..53abe925ecb1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index ff8057ed97ee..8491db57b0b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -26,6 +26,7 @@ config MLX4_EN_DCB config MLX4_CORE tristate depends on PCI + select NET_DEVLINK default n config MLX4_DEBUG diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 2cbd2bd7c67c..36a92b19e613 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return fallback(dev, skb, NULL); + return netdev_pick_tx(dev, skb, NULL); - return fallback(dev, skb, NULL) % rings_p_up; + return netdev_pick_tx(dev, skb, NULL) % rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, const void *src, @@ -1043,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) send_doorbell = __netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes, - skb->xmit_more); + netdev_xmit_more()); real_size = (real_size / 16) & 0x3f; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8137454e2534..630f15977f09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback); + struct net_device *sb_dev); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 6debffb8336b..9aca8086ee01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -5,6 +5,7 @@ config MLX5_CORE tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" depends on PCI + select NET_DEVLINK imply PTP_1588_CLOCK imply VXLAN default n diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1a16f6d73cbc..3dbbe3b643b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -22,7 +22,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ # mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o + en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \ + en/params.o # # Netdev extra diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index be48c6440251..0a2ffe794a54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", - dev_name(&dev->pdev->dev)); + dev->priv.name); } static void clean_debug_files(struct mlx5_core_dev *dev) @@ -1902,9 +1902,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) memset(cmd, 0, sizeof(*cmd)); cmd_if_rev = cmdif_rev(dev); if (cmd_if_rev != CMD_IF_REV) { - dev_err(&dev->pdev->dev, - "Driver cmdif rev(%d) differs from firmware's(%d)\n", - CMD_IF_REV, cmd_if_rev); + mlx5_core_err(dev, + "Driver cmdif rev(%d) differs from firmware's(%d)\n", + CMD_IF_REV, cmd_if_rev); return -EINVAL; } @@ -1921,14 +1921,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->log_sz = cmd_l >> 4 & 0xf; cmd->log_stride = cmd_l & 0xf; if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { - dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", - 1 << cmd->log_sz); + mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); err = -EINVAL; goto err_free_page; } if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { - dev_err(&dev->pdev->dev, "command queue size overflow\n"); + mlx5_core_err(dev, "command queue size overflow\n"); err = -EINVAL; goto err_free_page; } @@ -1939,8 +1939,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { - dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", - CMD_IF_REV, cmd->cmdif_rev); + mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", + CMD_IF_REV, cmd->cmdif_rev); err = -EOPNOTSUPP; goto err_free_page; } @@ -1956,7 +1956,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); if (cmd_l & 0xfff) { - dev_err(&dev->pdev->dev, "invalid command queue address\n"); + mlx5_core_err(dev, "invalid command queue address\n"); err = -ENOMEM; goto err_free_page; } @@ -1976,7 +1976,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); if (!cmd->wq) { - dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); + mlx5_core_err(dev, "failed to create command workqueue\n"); err = -ENOMEM; goto err_cache; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h index 83f90e9aff45..7b5901d42994 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h @@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw, TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), TP_STRUCT__entry( - __string(dev_name, dev_name(&tracer->dev->pdev->dev)) + __string(dev_name, tracer->dev->priv.name) __field(u64, trace_timestamp) __field(bool, lost) __field(u8, event_id) @@ -55,7 +55,7 @@ TRACE_EVENT(mlx5_fw, ), TP_fast_assign( - __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev)); + __assign_str(dev_name, tracer->dev->priv.name); __entry->trace_timestamp = trace_timestamp; __entry->lost = lost; __entry->event_id = event_id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d3eaf2ceaa39..7e0c3d4de108 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -241,7 +241,6 @@ struct mlx5e_params { struct net_dim_cq_moder rx_cq_moderation; struct net_dim_cq_moder tx_cq_moderation; bool lro_en; - u32 lro_wqe_sz; u8 tx_min_inline_mode; bool vlan_strip_disable; bool scatter_fcs_en; @@ -410,14 +409,17 @@ struct mlx5e_xdp_info_fifo { struct mlx5e_xdp_wqe_info { u8 num_wqebbs; - u8 num_ds; + u8 num_pkts; }; struct mlx5e_xdp_mpwqe { /* Current MPWQE session */ struct mlx5e_tx_wqe *wqe; u8 ds_count; + u8 pkt_count; u8 max_ds_count; + u8 complete; + u8 inline_on; }; struct mlx5e_xdpsq; @@ -429,7 +431,6 @@ struct mlx5e_xdpsq { /* dirtied @completion */ u32 xdpi_fifo_cc; u16 cc; - bool redirect_flush; /* dirtied @xmit */ u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; @@ -462,10 +463,10 @@ struct mlx5e_xdpsq { struct mlx5e_icosq { /* data path */ + u16 cc; + u16 pc; - /* dirtied @xmit */ - u16 pc ____cacheline_aligned_in_smp; - + struct mlx5_wqe_ctrl_seg *doorbell_cseg; struct mlx5e_cq cq; /* write@xmit, read@completion */ @@ -532,7 +533,8 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); enum mlx5e_rq_flag { - MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), + MLX5E_RQ_FLAG_XDP_XMIT, + MLX5E_RQ_FLAG_XDP_REDIRECT, }; struct mlx5e_rq_frag_info { @@ -563,8 +565,10 @@ struct mlx5e_rq { struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; u16 num_strides; + u16 actual_wq_head; u8 log_stride_sz; - bool umr_in_progress; + u8 umr_in_progress; + u8 umr_last_bulk; } mpwqe; }; struct { @@ -769,12 +773,12 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback); + struct net_device *sb_dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi); + struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); +void mlx5e_trigger_irq(struct mlx5e_icosq *sq); void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); @@ -886,6 +890,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); } +static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_ETH(mdev, swp) && + MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso); +} + +struct mlx5e_swp_spec { + __be16 l3_proto; + u8 l4_proto; + u8 is_tun; + __be16 tun_l3_proto; + u8 tun_l4_proto; +}; + +static inline void +mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, + struct mlx5e_swp_spec *swp_spec) +{ + /* SWP offsets are in 2-bytes words */ + eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; + if (swp_spec->l3_proto == htons(ETH_P_IPV6)) + eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; + if (swp_spec->l4_proto) { + eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2; + if (swp_spec->l4_proto == IPPROTO_UDP) + eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP; + } + + if (swp_spec->is_tun) { + eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; + if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6)) + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; + } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */ + eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; + if (swp_spec->l3_proto == htons(ETH_P_IPV6)) + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; + } + switch (swp_spec->tun_l4_proto) { + case IPPROTO_UDP: + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; + /* fall through */ + case IPPROTO_TCP: + eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; + break; + } +} + static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe **wqe, u16 *pi) @@ -930,7 +981,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, */ wmb(); - mlx5_write64((__be32 *)ctrl, uar_map, NULL); + mlx5_write64((__be32 *)ctrl, uar_map); } static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) @@ -1042,6 +1093,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); +void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_rss_params *rss_params, struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c new file mode 100644 index 000000000000..d3744bffbae3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "en/params.h" + +u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) +{ + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + u32 frag_sz; + + linear_rq_headroom += NET_IP_ALIGN; + + frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); + + if (params->xdp_prog && frag_sz < PAGE_SIZE) + frag_sz = PAGE_SIZE; + + return frag_sz; +} + +u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) +{ + u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); + + return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); +} + +bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); + + return !params->lro_en && frag_sz <= PAGE_SIZE; +} + +#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ + MLX5_MPWQE_LOG_STRIDE_SZ_BASE) +bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); + s8 signed_log_num_strides_param; + u8 log_num_strides; + + if (!mlx5e_rx_is_linear_skb(params)) + return false; + + if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) + return false; + + if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) + return true; + + log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); + signed_log_num_strides_param = + (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; + + return signed_log_num_strides_param >= 0; +} + +u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) +{ + u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params); + + /* Numbers are unsigned, don't subtract to avoid underflow. */ + if (params->log_rq_mtu_frames < + log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + + return params->log_rq_mtu_frames - log_pkts_per_wqe; +} + +u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) + return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); + + return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); +} + +u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return MLX5_MPWRQ_LOG_WQE_SZ - + mlx5e_mpwqe_get_log_stride_size(mdev, params); +} + +u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u16 linear_rq_headroom = params->xdp_prog ? + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + bool is_linear_skb; + + linear_rq_headroom += NET_IP_ALIGN; + + is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? + mlx5e_rx_is_linear_skb(params) : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params); + + return is_linear_skb ? linear_rq_headroom : 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h new file mode 100644 index 000000000000..b106a0236f36 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_EN_PARAMS_H__ +#define __MLX5_EN_PARAMS_H__ + +#include "en.h" + +u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params); +u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params); +bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params); +bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params); +u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + +#endif /* __MLX5_EN_PARAMS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 4ab0d030b544..633b117eb13e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -167,23 +167,23 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } /** - * update_buffer_lossy() - * max_mtu: netdev's max_mtu - * pfc_en: <input> current pfc configuration - * buffer: <input> current prio to buffer mapping - * xoff: <input> xoff value - * port_buffer: <output> port receive buffer configuration - * change: <output> + * update_buffer_lossy - Update buffer configuration based on pfc + * @max_mtu: netdev's max_mtu + * @pfc_en: <input> current pfc configuration + * @buffer: <input> current prio to buffer mapping + * @xoff: <input> xoff value + * @port_buffer: <output> port receive buffer configuration + * @change: <output> * - * Update buffer configuration based on pfc configuraiton and priority - * to buffer mapping. - * Buffer's lossy bit is changed to: - * lossless if there is at least one PFC enabled priority mapped to this buffer - * lossy if all priorities mapped to this buffer are PFC disabled + * Update buffer configuration based on pfc configuraiton and + * priority to buffer mapping. + * Buffer's lossy bit is changed to: + * lossless if there is at least one PFC enabled priority + * mapped to this buffer lossy if all priorities mapped to + * this buffer are PFC disabled * - * Return: - * Return 0 if no error. - * Set change to true if buffer configuration is modified. + * @return: 0 if no error, + * sets change to true if buffer configuration was modified. */ static int update_buffer_lossy(unsigned int max_mtu, u8 pfc_en, u8 *buffer, u32 xoff, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index eec07b34b4ad..fe5d4d7f15ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -74,7 +74,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, if (ret) return ret; - if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway) + if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) return -ENETUNREACH; #else return -EOPNOTSUPP; @@ -100,7 +100,7 @@ static const char *mlx5e_netdev_kind(struct net_device *dev) if (dev->rtnl_link_ops) return dev->rtnl_link_ops->kind; else - return ""; + return "unknown"; } static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, @@ -640,8 +640,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, headers_c, headers_v); } else { netdev_warn(priv->netdev, - "decapsulation offload is not supported for %s net device (%d)\n", - mlx5e_netdev_kind(filter_dev), tunnel_type); + "decapsulation offload is not supported for %s (kind: \"%s\")\n", + netdev_name(filter_dev), + mlx5e_netdev_kind(filter_dev)); + return -EOPNOTSUPP; } return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index cad34d6f5f45..399957104f9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -105,7 +105,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, if (unlikely(err)) goto xdp_abort; __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); - rq->xdpsq.redirect_flush = true; + __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); mlx5e_page_dma_unmap(rq, di); rq->stats->xdp_redirect++; return true; @@ -125,6 +125,7 @@ xdp_abort: static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) { struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5_wq_cyc *wq = &sq->wq; u8 wqebbs; u16 pi; @@ -132,7 +133,9 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); prefetchw(session->wqe->data); - session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->pkt_count = 0; + session->complete = 0; pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); @@ -151,6 +154,10 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) MLX5E_XDP_MPW_MAX_WQEBBS); session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; + + mlx5e_xdp_update_inline_state(sq); + + stats->mpwqe++; } static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) @@ -167,7 +174,7 @@ static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); - wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT; + wi->num_pkts = session->pkt_count; sq->pc += wi->num_wqebbs; @@ -182,11 +189,9 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; - dma_addr_t dma_addr = xdpi->dma_addr; struct xdp_frame *xdpf = xdpi->xdpf; - unsigned int dma_len = xdpf->len; - if (unlikely(sq->hw_mtu < dma_len)) { + if (unlikely(sq->hw_mtu < xdpf->len)) { stats->err++; return false; } @@ -203,9 +208,10 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, mlx5e_xdp_mpwqe_session_start(sq); } - mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len); + mlx5e_xdp_mpwqe_add_dseg(sq, xdpi, stats); - if (unlikely(session->ds_count == session->max_ds_count)) + if (unlikely(session->complete || + session->ds_count == session->max_ds_count)) mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); @@ -319,7 +325,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) sqcc += wi->num_wqebbs; - for (j = 0; j < wi->num_ds; j++) { + for (j = 0; j < wi->num_pkts; j++) { struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); @@ -360,7 +366,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) sq->cc += wi->num_wqebbs; - for (i = 0; i < wi->num_ds; i++) { + for (i = 0; i < wi->num_pkts; i++) { struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); @@ -439,9 +445,9 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) mlx5e_xmit_xdp_doorbell(xdpsq); - if (xdpsq->redirect_flush) { + if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { xdp_do_flush_map(); - xdpsq->redirect_flush = false; + __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 553956cadc8a..8b537a4b0840 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -74,16 +74,68 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) } } +/* Enable inline WQEs to shift some load from a congested HCA (HW) to + * a less congested cpu (SW). + */ +static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) +{ + u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + +#define MLX5E_XDP_INLINE_WATERMARK_LOW 10 +#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128 + + if (session->inline_on) { + if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW) + session->inline_on = 0; + return; + } + + /* inline is false */ + if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH) + session->inline_on = 1; +} + static inline void -mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len) +mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi, + struct mlx5e_xdpsq_stats *stats) { struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + dma_addr_t dma_addr = xdpi->dma_addr; + struct xdp_frame *xdpf = xdpi->xdpf; struct mlx5_wqe_data_seg *dseg = - (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++; + (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; + u16 dma_len = xdpf->len; + session->pkt_count++; + +#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) + + if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { + struct mlx5_wqe_inline_seg *inline_dseg = + (struct mlx5_wqe_inline_seg *)dseg; + u16 ds_len = sizeof(*inline_dseg) + dma_len; + u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS); + + if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) { + /* Not enough space for inline wqe, send with memory pointer */ + session->complete = true; + goto no_inline; + } + + inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG); + memcpy(inline_dseg->data, xdpf->data, dma_len); + + session->ds_count += ds_cnt; + stats->inlnw++; + return; + } + +no_inline: dseg->addr = cpu_to_be64(dma_addr); dseg->byte_count = cpu_to_be32(dma_len); dseg->lkey = sq->mkey_be; + session->ds_count++; } static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, @@ -110,5 +162,4 @@ mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo) { return fifo->xi[(*fifo->cc)++ & fifo->mask]; } - #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 1dd225380a66..6da7c88742dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -40,6 +40,57 @@ #include "en_accel/tls_rxtx.h" #include "en.h" +#if IS_ENABLED(CONFIG_GENEVE) +static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) +{ + return mlx5_tx_swp_supported(mdev); +} + +static inline void +mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) +{ + struct mlx5e_swp_spec swp_spec = {}; + unsigned int offset = 0; + __be16 l3_proto; + u8 l4_proto; + + l3_proto = vlan_get_protocol(skb); + switch (l3_proto) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); + break; + default: + return; + } + + if (l4_proto != IPPROTO_UDP || + udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT)) + return; + swp_spec.l3_proto = l3_proto; + swp_spec.l4_proto = l4_proto; + swp_spec.is_tun = true; + if (inner_ip_hdr(skb)->version == 6) { + swp_spec.tun_l3_proto = htons(ETH_P_IPV6); + swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr; + } else { + swp_spec.tun_l3_proto = htons(ETH_P_IP); + swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; + } + + mlx5e_set_eseg_swp(skb, eseg, &swp_spec); +} + +#else +static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) +{ + return false; +} + +#endif /* CONFIG_GENEVE */ + static inline void mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 53608afd39b6..0dd17514caae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u8 mode, struct xfrm_offload *xo) { - u8 proto; + struct mlx5e_swp_spec swp_spec = {}; /* Tunnel Mode: * SWP: OutL3 InL3 InL4 @@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, * SWP: OutL3 InL4 * InL3 * Pkt: MAC IP ESP L4 - * - * Offsets are in 2-byte words, counting from start of frame */ - eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2; - if (skb->protocol == htons(ETH_P_IPV6)) - eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6; - - if (mode == XFRM_MODE_TUNNEL) { - eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; + swp_spec.l3_proto = skb->protocol; + swp_spec.is_tun = mode == XFRM_MODE_TUNNEL; + if (swp_spec.is_tun) { if (xo->proto == IPPROTO_IPV6) { - eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; - proto = inner_ipv6_hdr(skb)->nexthdr; + swp_spec.tun_l3_proto = htons(ETH_P_IPV6); + swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr; } else { - proto = inner_ip_hdr(skb)->protocol; + swp_spec.tun_l3_proto = htons(ETH_P_IP); + swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; } } else { - eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2; - if (skb->protocol == htons(ETH_P_IPV6)) - eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; - proto = xo->proto; - } - switch (proto) { - case IPPROTO_UDP: - eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; - /* Fall through */ - case IPPROTO_TCP: - eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; - break; + swp_spec.tun_l3_proto = skb->protocol; + swp_spec.tun_l4_proto = xo->proto; } + + mlx5e_set_eseg_swp(skb, eseg, &swp_spec); } void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index be137d4a9169..439bf5953885 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, */ nskb->ip_summed = CHECKSUM_PARTIAL; - nskb->xmit_more = 1; nskb->queue_mapping = skb->queue_mapping; } @@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, sq->stats->tls_resync_bytes += nskb->len; mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, cpu_to_be64(info.rcd_sn)); - mlx5e_sq_xmit(sq, nskb, *wqe, *pi); + mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true); mlx5e_sq_fetch_wqe(sq, wqe, pi); return skb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 46157e2a1e5a..2a5820f8c6eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -34,6 +34,7 @@ #include <net/pkt_cls.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> +#include <net/geneve.h> #include <linux/bpf.h> #include <linux/if_bridge.h> #include <net/page_pool.h> @@ -43,6 +44,7 @@ #include "en_rep.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/en_accel.h" #include "en_accel/tls.h" #include "accel/ipsec.h" #include "accel/tls.h" @@ -53,6 +55,7 @@ #include "lib/eq.h" #include "en/monitor_stats.h" #include "en/reporter.h" +#include "en/params.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -101,108 +104,9 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) return true; } -static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) -{ - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); - u16 linear_rq_headroom = params->xdp_prog ? - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; - u32 frag_sz; - - linear_rq_headroom += NET_IP_ALIGN; - - frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); - - if (params->xdp_prog && frag_sz < PAGE_SIZE) - frag_sz = PAGE_SIZE; - - return frag_sz; -} - -static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) -{ - u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); - - return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); -} - -static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); - - return !params->lro_en && frag_sz <= PAGE_SIZE; -} - -#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ - MLX5_MPWQE_LOG_STRIDE_SZ_BASE) -static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); - s8 signed_log_num_strides_param; - u8 log_num_strides; - - if (!mlx5e_rx_is_linear_skb(mdev, params)) - return false; - - if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) - return false; - - if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) - return true; - - log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz); - signed_log_num_strides_param = - (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE; - - return signed_log_num_strides_param >= 0; -} - -static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params) -{ - if (params->log_rq_mtu_frames < - mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW) - return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; - - return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params); -} - -static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); - - return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); -} - -static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - return MLX5_MPWRQ_LOG_WQE_SZ - - mlx5e_mpwqe_get_log_stride_size(mdev, params); -} - -static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, - struct mlx5e_params *params) -{ - u16 linear_rq_headroom = params->xdp_prog ? - XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; - bool is_linear_skb; - - linear_rq_headroom += NET_IP_ALIGN; - - is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? - mlx5e_rx_is_linear_skb(mdev, params) : - mlx5e_rx_mpwqe_is_linear_skb(mdev, params); - - return is_linear_skb ? linear_rq_headroom : 0; -} - void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; @@ -469,7 +373,6 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) } static int mlx5e_init_di_list(struct mlx5e_rq *rq, - struct mlx5e_params *params, int wq_sz, int cpu) { int len = wq_sz << rq->wqe.info.log_num_frags; @@ -597,7 +500,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_free; } - err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); + err = mlx5e_init_di_list(rq, wq_sz, c->cpu); if (err) goto err_free; rq->post_wqes = mlx5e_post_rx_wqes; @@ -615,7 +518,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_free; } - rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ? + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params) ? mlx5e_skb_from_cqe_linear : mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; @@ -902,10 +805,14 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + u16 head = wq->head; + int i; - /* UMR WQE (if in progress) is always at wq->head */ - if (rq->mpwqe.umr_in_progress) - rq->dealloc_wqe(rq, wq->head); + /* Outstanding UMR WQEs (in progress) start at wq->head */ + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { + rq->dealloc_wqe(rq, head); + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); + } while (!mlx5_wq_ll_is_empty(wq)) { struct mlx5e_rx_wqe_ll *wqe; @@ -970,16 +877,8 @@ err_free_rq: static void mlx5e_activate_rq(struct mlx5e_rq *rq) { - struct mlx5e_icosq *sq = &rq->channel->icosq; - struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_tx_wqe *nopwqe; - - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + mlx5e_trigger_irq(&rq->channel->icosq); } static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -1091,7 +990,7 @@ static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) { - u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.ico_wqe)), @@ -1527,7 +1426,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, dseg->lkey = sq->mkey_be; wi->num_wqebbs = 1; - wi->num_ds = 1; + wi->num_pkts = 1; } } @@ -2053,7 +1952,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, byte_count += MLX5E_METADATA_ETHER_LEN; #endif - if (mlx5e_rx_is_linear_skb(mdev, params)) { + if (mlx5e_rx_is_linear_skb(params)) { int frag_stride; frag_stride = mlx5e_rx_get_linear_frag_sz(params); @@ -2107,6 +2006,13 @@ static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) return order_base_2(sz); } +static u8 mlx5e_get_rq_log_wq_sz(void *rqc) +{ + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + return MLX5_GET(wq, wq, log_wq_sz); +} + static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) @@ -2177,10 +2083,13 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + bool allow_swp; + allow_swp = mlx5_geneve_tx_allowed(priv->mdev) || + !!MLX5_IPSEC_DEV(priv->mdev); mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); - MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev)); + MLX5_SET(sqc, sqc, allow_swp, allow_swp); } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -2270,13 +2179,28 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); } +static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, + struct mlx5e_rq_param *rqp) +{ + switch (params->rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return order_base_2(MLX5E_UMR_WQEBBS) + + mlx5e_get_rq_log_wq_sz(rqp->rqc); + default: /* MLX5_WQ_TYPE_CYCLIC */ + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + } +} + static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + u8 icosq_log_wq_sz; mlx5e_build_rq_param(priv, params, &cparam->rq); + + icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); + mlx5e_build_sq_param(priv, params, &cparam->sq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); @@ -2332,14 +2256,18 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) mlx5e_activate_channel(chs->c[i]); } +#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ + static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) { int err = 0; int i; - for (i = 0; i < chs->num; i++) - err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, - err ? 0 : 20000); + for (i = 0; i < chs->num; i++) { + int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; + + err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); + } return err ? -ETIMEDOUT : 0; } @@ -2636,7 +2564,7 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); } @@ -2811,6 +2739,21 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) return 0; } +void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) +{ + struct mlx5e_params *params = &priv->channels.params; + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; + + /* MTU range: 68 - hw-specific max */ + netdev->min_mtu = ETH_MIN_MTU; + + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu), + ETH_MAX_MTU); +} + static void mlx5e_netdev_set_tcs(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3775,7 +3718,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params.sw_mtu = new_mtu; if (params->xdp_prog && - !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + !mlx5e_rx_is_linear_skb(&new_channels.params)) { netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", new_mtu, mlx5e_xdp_max_mtu(params)); err = -EINVAL; @@ -4115,6 +4058,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, /* Verify if UDP port is being offloaded by HW */ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) return features; + +#if IS_ENABLED(CONFIG_GENEVE) + /* Support Geneve offload for default UDP port */ + if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev)) + return features; +#endif } out: @@ -4210,7 +4159,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) new_channels.params = priv->channels.params; new_channels.params.xdp_prog = prog; - if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { + if (!mlx5e_rx_is_linear_skb(&new_channels.params)) { netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", new_channels.params.sw_mtu, mlx5e_xdp_max_mtu(&new_channels.params)); @@ -4264,7 +4213,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mlx5e_set_rq_type(priv->mdev, &priv->channels.params); if (was_opened && reset) - mlx5e_open_locked(netdev); + err = mlx5e_open_locked(netdev); if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) goto unlock; @@ -4554,7 +4503,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, if (!slow_pci_heuristic(mdev) && mlx5e_striding_rq_possible(mdev, params) && (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || - !mlx5e_rx_is_linear_skb(mdev, params))) + !mlx5e_rx_is_linear_skb(params))) MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); @@ -4686,7 +4635,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; - if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || + MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -4694,7 +4644,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; } - if (mlx5_vxlan_allowed(mdev->vxlan)) { + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | @@ -4913,7 +4863,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - u16 max_mtu; mlx5e_init_l2_addr(priv); @@ -4921,10 +4870,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) if (!netif_running(netdev)) mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - /* MTU range: 68 - hw-specific max */ - netdev->min_mtu = ETH_MIN_MTU; - mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); + mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); mlx5_lag_add(mdev, netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a66b6ed80b30..6bfdefa8b9f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -795,7 +795,8 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - if (!mlx5e_tc_tun_device_to_offload(priv, netdev)) + if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && + !is_vlan_dev(netdev)) return NOTIFY_OK; switch (event) { @@ -1623,13 +1624,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv) { - struct net_device *netdev = priv->netdev; - struct mlx5_core_dev *mdev = priv->mdev; - u16 max_mtu; - - netdev->min_mtu = ETH_MIN_MTU; - mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); + mlx5e_set_netdev_mtu_boundaries(priv); } static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index c3b3002ff62f..13133e7f088e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -409,14 +409,15 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle mlx5e_page_release(rq, &dma_info[i], recycle); } -static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) { struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); - rq->mpwqe.umr_in_progress = false; + do { + u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + mlx5_wq_ll_push(wq, next_wqe_index); + } while (--n); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); @@ -426,7 +427,7 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) { - return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc); } static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, @@ -478,8 +479,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); wi->consumed_strides = 0; - rq->mpwqe.umr_in_progress = true; - umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); @@ -487,7 +486,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->pc += MLX5E_UMR_WQEBBS; - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); + + sq->doorbell_cseg = &umr_wqe->ctrl; return 0; @@ -542,37 +542,13 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !!err; } -static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, - struct mlx5e_icosq *sq, - struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe) -{ - struct mlx5_wq_cyc *wq = &sq->wq; - u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); - struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; - - mlx5_cqwq_pop(&cq->wq); - - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { - netdev_WARN_ONCE(cq->channel->netdev, - "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); - return; - } - - if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { - mlx5e_post_rx_mpwqe(rq); - return; - } - - if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) - netdev_WARN_ONCE(cq->channel->netdev, - "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode); -} - static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5_cqe64 *cqe; + u8 completed_umr = 0; + u16 sqcc; + int i; if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return; @@ -581,28 +557,96 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) if (likely(!cqe)) return; - /* by design, there's only a single cqe */ - mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe); + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + i = 0; + do { + u16 wqe_counter; + bool last_wqe; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); + break; + } + do { + struct mlx5e_sq_wqe_info *wi; + u16 ci; + + last_wqe = (sqcc == wqe_counter); + + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.ico_wqe[ci]; + + if (likely(wi->opcode == MLX5_OPCODE_UMR)) { + sqcc += MLX5E_UMR_WQEBBS; + completed_umr++; + } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { + sqcc++; + } else { + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OPCODE in ICOSQ WQE info: 0x%x\n", + wi->opcode); + } + + } while (!last_wqe); + + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + + sq->cc = sqcc; mlx5_cqwq_update_db_record(&cq->wq); + + if (likely(completed_umr)) { + mlx5e_post_rx_mpwqe(rq, completed_umr); + rq->mpwqe.umr_in_progress -= completed_umr; + } } bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { + struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + u8 missing, i; + u16 head; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); + mlx5e_poll_ico_cq(&sq->cq, rq); + + missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; - if (mlx5_wq_ll_is_full(wq)) + if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) + rq->stats->congst_umr++; + +#define UMR_WQE_BULK (2) + if (likely(missing < UMR_WQE_BULK)) return false; - if (!rq->mpwqe.umr_in_progress) - mlx5e_alloc_rx_mpwqe(rq, wq->head); - else - rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; + head = rq->mpwqe.actual_wq_head; + i = missing; + do { + if (unlikely(mlx5e_alloc_rx_mpwqe(rq, head))) + break; + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); + } while (--i); + + rq->mpwqe.umr_last_bulk = missing - i; + if (sq->doorbell_cseg) { + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); + sq->doorbell_cseg = NULL; + } + + rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; + rq->mpwqe.actual_wq_head = head; return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b75aa8b8bf04..483d321d2151 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -65,6 +65,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, @@ -79,6 +81,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, @@ -89,7 +93,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, @@ -160,6 +163,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; + s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; s->rx_xdp_tx_full += xdpsq_stats->full; s->rx_xdp_tx_err += xdpsq_stats->err; s->rx_xdp_tx_cqe += xdpsq_stats->cqes; @@ -170,7 +175,6 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; - s->rx_page_reuse += rq_stats->page_reuse; s->rx_cache_reuse += rq_stats->cache_reuse; s->rx_cache_full += rq_stats->cache_full; s->rx_cache_empty += rq_stats->cache_empty; @@ -185,6 +189,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->ch_eq_rearm += ch_stats->eq_rearm; /* xdp redirect */ s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; + s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; s->tx_xdp_full += xdpsq_red_stats->full; s->tx_xdp_err += xdpsq_red_stats->err; s->tx_xdp_cqes += xdpsq_red_stats->cqes; @@ -1212,7 +1218,6 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, @@ -1245,6 +1250,8 @@ static const struct counter_desc sq_stats_desc[] = { static const struct counter_desc rq_xdpsq_stats_desc[] = { { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, @@ -1252,6 +1259,8 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { static const struct counter_desc xdpsq_stats_desc[] = { { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 16c3b785f282..cdddcc46971b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -77,6 +77,8 @@ struct mlx5e_sw_stats { u64 rx_xdp_drop; u64 rx_xdp_redirect; u64 rx_xdp_tx_xmit; + u64 rx_xdp_tx_mpwqe; + u64 rx_xdp_tx_inlnw; u64 rx_xdp_tx_full; u64 rx_xdp_tx_err; u64 rx_xdp_tx_cqe; @@ -91,6 +93,8 @@ struct mlx5e_sw_stats { u64 tx_queue_wake; u64 tx_cqe_err; u64 tx_xdp_xmit; + u64 tx_xdp_mpwqe; + u64 tx_xdp_inlnw; u64 tx_xdp_full; u64 tx_xdp_err; u64 tx_xdp_cqes; @@ -101,7 +105,6 @@ struct mlx5e_sw_stats { u64 rx_buff_alloc_err; u64 rx_cqe_compress_blks; u64 rx_cqe_compress_pkts; - u64 rx_page_reuse; u64 rx_cache_reuse; u64 rx_cache_full; u64 rx_cache_empty; @@ -201,7 +204,6 @@ struct mlx5e_rq_stats { u64 buff_alloc_err; u64 cqe_compress_blks; u64 cqe_compress_pkts; - u64 page_reuse; u64 cache_reuse; u64 cache_full; u64 cache_empty; @@ -241,6 +243,8 @@ struct mlx5e_sq_stats { struct mlx5e_xdpsq_stats { u64 xmit; + u64 mpwqe; + u64 inlnw; u64 full; u64 err; /* dirtied @completion */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d75dc44eb2ff..a2070817a627 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -44,6 +44,7 @@ #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> #include <net/arp.h> +#include <net/ipv6_stubs.h> #include "en.h" #include "en_rep.h" #include "en_tc.h" @@ -1437,6 +1438,26 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return 0; } +static void *get_match_headers_criteria(u32 flags, + struct mlx5_flow_spec *spec) +{ + return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? + MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + inner_headers) : + MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); +} + +static void *get_match_headers_value(u32 flags, + struct mlx5_flow_spec *spec) +{ + return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? + MLX5_ADDR_OF(fte_match_param, spec->match_value, + inner_headers) : + MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); +} + static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, @@ -1502,10 +1523,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, /* In decap flow, header pointers should point to the inner * headers, outer header were already set by parse_tunnel_attr */ - headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - inner_headers); + headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, + spec); + headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, + spec); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { @@ -1520,11 +1541,23 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (match.mask->n_proto) *match_level = MLX5_MATCH_L2; } - - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) || + is_vlan_dev(filter_dev)) { + struct flow_dissector_key_vlan filter_dev_mask; + struct flow_dissector_key_vlan filter_dev_key; struct flow_match_vlan match; - flow_rule_match_vlan(rule, &match); + if (is_vlan_dev(filter_dev)) { + match.key = &filter_dev_key; + match.key->vlan_id = vlan_dev_vlan_id(filter_dev); + match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev); + match.key->vlan_priority = 0; + match.mask = &filter_dev_mask; + memset(match.mask, 0xff, sizeof(*match.mask)); + match.mask->vlan_priority = 0; + } else { + flow_rule_match_vlan(rule, &match); + } if (match.mask->vlan_id || match.mask->vlan_priority || match.mask->vlan_tpid) { @@ -1827,6 +1860,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct pedit_headers { struct ethhdr eth; + struct vlan_hdr vlan; struct iphdr ip4; struct ipv6hdr ip6; struct tcphdr tcp; @@ -1873,38 +1907,73 @@ struct mlx5_fields { u8 field; u8 size; u32 offset; + u32 match_offset; }; -#define OFFLOAD(fw_field, size, field, off) \ - {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)} +#define OFFLOAD(fw_field, size, field, off, match_field) \ + {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \ + offsetof(struct pedit_headers, field) + (off), \ + MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} + +static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, + void *matchmaskp, int size) +{ + bool same = false; + + switch (size) { + case sizeof(u8): + same = ((*(u8 *)valp) & (*(u8 *)maskp)) == + ((*(u8 *)matchvalp) & (*(u8 *)matchmaskp)); + break; + case sizeof(u16): + same = ((*(u16 *)valp) & (*(u16 *)maskp)) == + ((*(u16 *)matchvalp) & (*(u16 *)matchmaskp)); + break; + case sizeof(u32): + same = ((*(u32 *)valp) & (*(u32 *)maskp)) == + ((*(u32 *)matchvalp) & (*(u32 *)matchmaskp)); + break; + } + + return same; +} static struct mlx5_fields fields[] = { - OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0), - OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0), - OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0), - OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0), - OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0), - - OFFLOAD(IP_TTL, 1, ip4.ttl, 0), - OFFLOAD(SIPV4, 4, ip4.saddr, 0), - OFFLOAD(DIPV4, 4, ip4.daddr, 0), - - OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0), - OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0), - OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0), - OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0), - OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0), - OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0), - OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0), - OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0), - OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0), - - OFFLOAD(TCP_SPORT, 2, tcp.source, 0), - OFFLOAD(TCP_DPORT, 2, tcp.dest, 0), - OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5), - - OFFLOAD(UDP_SPORT, 2, udp.source, 0), - OFFLOAD(UDP_DPORT, 2, udp.dest, 0), + OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16), + OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0), + OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16), + OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0), + OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype), + OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid), + + OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit), + OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), + OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + + OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), + OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), + OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), + OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), + OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), + OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), + OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), + OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), + OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit), + + OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport), + OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport), + OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags), + + OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport), + OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport), }; /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at @@ -1913,9 +1982,14 @@ static struct mlx5_fields fields[] = { */ static int offload_pedit_fields(struct pedit_headers_action *hdrs, struct mlx5e_tc_flow_parse_attr *parse_attr, + u32 *action_flags, struct netlink_ext_ack *extack) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; + void *headers_c = get_match_headers_criteria(*action_flags, + &parse_attr->spec); + void *headers_v = get_match_headers_value(*action_flags, + &parse_attr->spec); int i, action_size, nactions, max_actions, first, last, next_z; void *s_masks_p, *a_masks_p, *vals_p; struct mlx5_fields *f; @@ -1939,6 +2013,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, nactions = parse_attr->num_mod_hdr_actions; for (i = 0; i < ARRAY_SIZE(fields); i++) { + bool skip; + f = &fields[i]; /* avoid seeing bits set from previous iterations */ s_mask = 0; @@ -1967,19 +2043,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, return -EOPNOTSUPP; } + skip = false; if (s_mask) { + void *match_mask = headers_c + f->match_offset; + void *match_val = headers_v + f->match_offset; + cmd = MLX5_ACTION_TYPE_SET; mask = s_mask; vals_p = (void *)set_vals + f->offset; + /* don't rewrite if we have a match on the same value */ + if (cmp_val_mask(vals_p, s_masks_p, match_val, + match_mask, f->size)) + skip = true; /* clear to denote we consumed this field */ memset(s_masks_p, 0, f->size); } else { + u32 zero = 0; + cmd = MLX5_ACTION_TYPE_ADD; mask = a_mask; vals_p = (void *)add_vals + f->offset; + /* add 0 is no change */ + if (!memcmp(vals_p, &zero, f->size)) + skip = true; /* clear to denote we consumed this field */ memset(a_masks_p, 0, f->size); } + if (skip) + continue; field_bsize = f->size * BITS_PER_BYTE; @@ -2026,6 +2117,15 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, return 0; } +static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev, + int namespace) +{ + if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ + return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions); + else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ + return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); +} + static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, struct pedit_headers_action *hdrs, int namespace, @@ -2037,11 +2137,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); - if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ - max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions); - else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */ - max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions); - + max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace); /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */ max_actions = min(max_actions, nkeys * 16); @@ -2074,6 +2170,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, goto out_err; } + if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) { + NL_SET_ERR_MSG_MOD(extack, + "The pedit offload action is not supported"); + goto out_err; + } + mask = act->mangle.mask; val = act->mangle.val; offset = act->mangle.offset; @@ -2092,6 +2194,7 @@ out_err: static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, struct pedit_headers_action *hdrs, + u32 *action_flags, struct netlink_ext_ack *extack) { struct pedit_headers *cmd_masks; @@ -2104,7 +2207,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, goto out_err; } - err = offload_pedit_fields(hdrs, parse_attr, extack); + err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack); if (err < 0) goto out_dealloc_parsed_actions; @@ -2216,11 +2319,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, u8 ip_proto; int i; - if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); - else - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); - + headers_v = get_match_headers_value(actions, spec); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -2266,7 +2365,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, actions = flow->nic_attr->action; if (flow->flags & MLX5E_TC_FLOW_EGRESS && - !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)) + !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || + (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) @@ -2291,6 +2391,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } +static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, + const struct flow_action_entry *act, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + u32 *action, struct netlink_ext_ack *extack) +{ + u16 mask16 = VLAN_VID_MASK; + u16 val16 = act->vlan.vid & VLAN_VID_MASK; + const struct flow_action_entry pedit_act = { + .id = FLOW_ACTION_MANGLE, + .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH, + .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI), + .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16), + .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16), + }; + u8 match_prio_mask, match_prio_val; + void *headers_c, *headers_v; + int err; + + headers_c = get_match_headers_criteria(*action, &parse_attr->spec); + headers_v = get_match_headers_value(*action, &parse_attr->spec); + + if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) && + MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN rewrite action must have VLAN protocol match"); + return -EOPNOTSUPP; + } + + match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio); + match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio); + if (act->vlan.prio != (match_prio_val & match_prio_mask)) { + NL_SET_ERR_MSG_MOD(extack, + "Changing VLAN prio is not supported"); + return -EOPNOTSUPP; + } + + err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, + hdrs, NULL); + *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return err; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -2326,6 +2470,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; break; + case FLOW_ACTION_VLAN_MANGLE: + err = add_vlan_rewrite_action(priv, + MLX5_FLOW_NAMESPACE_KERNEL, + act, parse_attr, hdrs, + &action, extack); + if (err) + return err; + + break; case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, act->csum_flags, @@ -2365,16 +2518,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, } break; default: - return -EINVAL; + NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + return -EOPNOTSUPP; } } if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, hdrs, extack); + parse_attr, hdrs, &action, extack); if (err) return err; + /* in case all pedit actions are skipped, remove the MOD_HDR + * flag. + */ + if (parse_attr->num_mod_hdr_actions == 0) + action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; } attr->action = action; @@ -2544,8 +2703,7 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, } break; default: - /* action is FLOW_ACT_VLAN_MANGLE */ - return -EOPNOTSUPP; + return -EINVAL; } attr->total_vlan = vlan_idx + 1; @@ -2553,15 +2711,60 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, return 0; } +static int add_vlan_push_action(struct mlx5e_priv *priv, + struct mlx5_esw_flow_attr *attr, + struct net_device **out_dev, + u32 *action) +{ + struct net_device *vlan_dev = *out_dev; + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_PUSH, + .vlan.vid = vlan_dev_vlan_id(vlan_dev), + .vlan.proto = vlan_dev_vlan_proto(vlan_dev), + .vlan.prio = 0, + }; + int err; + + err = parse_tc_vlan_action(priv, &vlan_act, attr, action); + if (err) + return err; + + *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), + dev_get_iflink(vlan_dev)); + if (is_vlan_dev(*out_dev)) + err = add_vlan_push_action(priv, attr, out_dev, action); + + return err; +} + +static int add_vlan_pop_action(struct mlx5e_priv *priv, + struct mlx5_esw_flow_attr *attr, + u32 *action) +{ + int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev); + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_POP, + }; + int err = 0; + + while (nest_level--) { + err = parse_tc_vlan_action(priv, &vlan_act, attr, action); + if (err) + return err; + } + + return err; +} + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; const struct ip_tunnel_info *info = NULL; const struct flow_action_entry *act; @@ -2633,6 +2836,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, uplink_upper == out_dev) out_dev = uplink_dev; + if (is_vlan_dev(out_dev)) { + err = add_vlan_push_action(priv, attr, + &out_dev, + &action); + if (err) + return err; + } + if (is_vlan_dev(parse_attr->filter_dev)) { + err = add_vlan_pop_action(priv, attr, + &action); + if (err) + return err; + } + if (!mlx5e_eswitch_rep(out_dev)) return -EOPNOTSUPP; @@ -2646,7 +2863,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, out_dev->ifindex; parse_attr->tun_info[attr->out_count] = *info; encap = false; - attr->parse_attr = parse_attr; attr->dests[attr->out_count].flags |= MLX5_ESW_DEST_ENCAP; attr->out_count++; @@ -2679,7 +2895,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, break; case FLOW_ACTION_VLAN_PUSH: case FLOW_ACTION_VLAN_POP: - err = parse_tc_vlan_action(priv, act, attr, &action); + if (act->id == FLOW_ACTION_VLAN_PUSH && + (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) { + /* Replace vlan pop+push with vlan modify */ + action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + err = add_vlan_rewrite_action(priv, + MLX5_FLOW_NAMESPACE_FDB, + act, parse_attr, hdrs, + &action, extack); + } else { + err = parse_tc_vlan_action(priv, act, attr, &action); + } + if (err) + return err; + + attr->split_count = attr->out_count; + break; + case FLOW_ACTION_VLAN_MANGLE: + err = add_vlan_rewrite_action(priv, + MLX5_FLOW_NAMESPACE_FDB, + act, parse_attr, hdrs, + &action, extack); if (err) return err; @@ -2705,16 +2941,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, break; } default: - return -EINVAL; + NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + return -EOPNOTSUPP; } } if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, hdrs, extack); + parse_attr, hdrs, &action, extack); if (err) return err; + /* in case all pedit actions are skipped, remove the MOD_HDR + * flag. we might have set split_count either by pedit or + * pop/push. if there is no pop/push either, reset it too. + */ + if (parse_attr->num_mod_hdr_actions == 0) { + action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) + attr->split_count = 0; + } } attr->action = action; @@ -2883,7 +3130,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 25a8f8260c14..7b61126fcec9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -32,6 +32,7 @@ #include <linux/tcp.h> #include <linux/if_vlan.h> +#include <net/geneve.h> #include <net/dsfield.h> #include "en.h" #include "ipoib/ipoib.h" @@ -110,11 +111,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb #endif u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { + int channel_ix = netdev_pick_tx(dev, skb, NULL); struct mlx5e_priv *priv = netdev_priv(dev); - int channel_ix = fallback(dev, skb, NULL); u16 num_channels; int up = 0; @@ -163,7 +163,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, case MLX5_INLINE_MODE_NONE: return 0; case MLX5_INLINE_MODE_TCP_UDP: - hlen = eth_get_headlen(skb->data, skb_headlen(skb)); + hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) hlen += VLAN_HLEN; break; @@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, static inline void mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, - struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) + struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, + bool xmit_more) { struct mlx5_wq_cyc *wq = &sq->wq; @@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->stats->stopped++; } - if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) + if (!xmit_more || netif_xmit_stopped(sq->txq)) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); } #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi) + struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) { struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wqe_ctrl_seg *cseg; @@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } stats->bytes += num_bytes; - stats->xmit_more += skb->xmit_more; + stats->xmit_more += netdev_xmit_more(); headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; @@ -392,6 +393,10 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg = &wqe->eth; dseg = wqe->data; +#if IS_ENABLED(CONFIG_GENEVE) + if (skb->encapsulation) + mlx5e_tx_tunnel_accel(skb, eseg); +#endif mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); eseg->mss = mss; @@ -419,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, - num_dma, wi, cseg); + num_dma, wi, cseg, xmit_more); return NETDEV_TX_OK; @@ -445,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!skb)) return NETDEV_TX_OK; - return mlx5e_sq_xmit(sq, skb, wqe, pi); + return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more()); } static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, @@ -655,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, } stats->bytes += num_bytes; - stats->xmit_more += skb->xmit_more; + stats->xmit_more += netdev_xmit_more(); headlen = skb->len - ihs - skb->data_len; ds_cnt += !!headlen; @@ -700,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, - num_dma, wi, cseg); + num_dma, wi, cseg, false); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index b4af5e19f6ac..f9862bf75491 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -71,6 +71,17 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) net_dim(&rq->dim, dim_sample); } +void mlx5e_trigger_irq(struct mlx5e_icosq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_tx_wqe *nopwqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index bb6e5b5d9681..e9837aeb7088 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -707,7 +707,7 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) __raw_writel((__force u32)cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ - mb(); + wmb(); } EXPORT_SYMBOL(mlx5_eq_update_ci); @@ -900,14 +900,12 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) } EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); +#ifdef CONFIG_RFS_ACCEL struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) { -#ifdef CONFIG_RFS_ACCEL return dev->priv.eq_table->rmap; -#else - return NULL; -#endif } +#endif struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 3f3cd32ae60a..e0ba59b5296f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -431,6 +431,9 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, return index; } +/* TODO: This mlx5e_tc function shouldn't be called by eswitch */ +void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 9b2d78ee22b8..21bdd0cda6f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -43,11 +43,6 @@ #include "ecpf.h" #include "lib/eq.h" -enum { - FDB_FAST_PATH = 0, - FDB_SLOW_PATH -}; - /* There are two match-all miss flows, one for unicast dst mac and * one for multicast. */ @@ -1287,13 +1282,13 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) int esw_offloads_init_reps(struct mlx5_eswitch *esw) { - int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); + int total_vports = MLX5_TOTAL_VPORTS(esw->dev); struct mlx5_core_dev *dev = esw->dev; struct mlx5_eswitch_rep *rep; u8 hw_id[ETH_ALEN], rep_type; int vport; - esw->offloads.vport_reps = kcalloc(total_vfs, + esw->offloads.vport_reps = kcalloc(total_vports, sizeof(struct mlx5_eswitch_rep), GFP_KERNEL); if (!esw->offloads.vport_reps) @@ -1523,8 +1518,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, return 0; } -void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); - static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) { mlx5e_tc_clean_fdb_peer_flows(esw); @@ -1700,8 +1693,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, { int err; - mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); - err = esw_offloads_steering_init(esw, total_nvports); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 5d5864e8df3c..a81e8d2168d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -21,6 +21,7 @@ struct mlx5_event_nb { static int any_notifier(struct notifier_block *, unsigned long, void *); static int temp_warn(struct notifier_block *, unsigned long, void *); static int port_module(struct notifier_block *, unsigned long, void *); +static int pcie_core(struct notifier_block *, unsigned long, void *); /* handler which forwards the event to events->nh, driver notifiers */ static int forward_event(struct notifier_block *, unsigned long, void *); @@ -30,6 +31,7 @@ static struct mlx5_nb events_nbs_ref[] = { {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY }, {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT }, {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT }, + {.nb.notifier_call = pcie_core, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT }, /* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE }, @@ -51,11 +53,14 @@ static struct mlx5_nb events_nbs_ref[] = { struct mlx5_events { struct mlx5_core_dev *dev; + struct workqueue_struct *wq; struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)]; /* driver notifier chain */ struct atomic_notifier_head nh; /* port module events stats */ struct mlx5_pme_stats pme_stats; + /*pcie_core*/ + struct work_struct pcie_core_work; }; static const char *eqe_type_str(u8 type) @@ -249,6 +254,69 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data return NOTIFY_OK; } +enum { + MLX5_PCI_POWER_COULD_NOT_BE_READ = 0x0, + MLX5_PCI_POWER_SUFFICIENT_REPORTED = 0x1, + MLX5_PCI_POWER_INSUFFICIENT_REPORTED = 0x2, +}; + +static void mlx5_pcie_event(struct work_struct *work) +{ + u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {0}; + u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {0}; + struct mlx5_events *events; + struct mlx5_core_dev *dev; + u8 power_status; + u16 pci_power; + + events = container_of(work, struct mlx5_events, pcie_core_work); + dev = events->dev; + + if (!MLX5_CAP_MCAM_FEATURE(dev, pci_status_and_power)) + return; + + mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_MPEIN, 0, 0); + power_status = MLX5_GET(mpein_reg, out, pwr_status); + pci_power = MLX5_GET(mpein_reg, out, pci_power); + + switch (power_status) { + case MLX5_PCI_POWER_COULD_NOT_BE_READ: + mlx5_core_info_rl(dev, + "PCIe slot power capability was not advertised.\n"); + break; + case MLX5_PCI_POWER_INSUFFICIENT_REPORTED: + mlx5_core_warn_rl(dev, + "Detected insufficient power on the PCIe slot (%uW).\n", + pci_power); + break; + case MLX5_PCI_POWER_SUFFICIENT_REPORTED: + mlx5_core_info_rl(dev, + "PCIe slot advertised sufficient power (%uW).\n", + pci_power); + break; + } +} + +static int pcie_core(struct notifier_block *nb, unsigned long type, void *data) +{ + struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, + struct mlx5_event_nb, + nb); + struct mlx5_events *events = event_nb->ctx; + struct mlx5_eqe *eqe = data; + + switch (eqe->sub_type) { + case MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT: + queue_work(events->wq, &events->pcie_core_work); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats) { *stats = dev->priv.events->pme_stats; @@ -277,11 +345,17 @@ int mlx5_events_init(struct mlx5_core_dev *dev) ATOMIC_INIT_NOTIFIER_HEAD(&events->nh); events->dev = dev; dev->priv.events = events; + events->wq = create_singlethread_workqueue("mlx5_events"); + if (!events->wq) + return -ENOMEM; + INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); + return 0; } void mlx5_events_cleanup(struct mlx5_core_dev *dev) { + destroy_workqueue(dev->priv.events->wq); kvfree(dev->priv.events); } @@ -304,6 +378,7 @@ void mlx5_events_stop(struct mlx5_core_dev *dev) for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--) mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb); + flush_workqueue(events->wq); } int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 873541ef4c1b..ca2296a2f9ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe) *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); /* Make sure that doorbell record is visible before ringing */ wmb(); - mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL); + mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET); } static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 7e2e871dbf83..52c9dee91ea4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -37,6 +37,7 @@ #include <linux/mlx5/eq.h> +#include "mlx5_core.h" #include "lib/eq.h" #include "fpga/cmd.h" @@ -62,26 +63,26 @@ struct mlx5_fpga_device { }; #define mlx5_fpga_dbg(__adev, format, ...) \ - dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, ##__VA_ARGS__) + mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define mlx5_fpga_err(__adev, format, ...) \ - dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, ##__VA_ARGS__) + mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define mlx5_fpga_warn(__adev, format, ...) \ - dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, ##__VA_ARGS__) + mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) #define mlx5_fpga_warn_ratelimited(__adev, format, ...) \ - dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \ - format, __func__, __LINE__, ##__VA_ARGS__) + mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \ + format, __func__, __LINE__, ##__VA_ARGS__) #define mlx5_fpga_notice(__adev, format, ...) \ - dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) + mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__) #define mlx5_fpga_info(__adev, format, ...) \ - dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) + mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__) int mlx5_fpga_init(struct mlx5_core_dev *mdev); void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0be3eb86dd84..9fcef7e3b86d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_ft_underlay_qp *uqp; int min_level = INT_MAX; - int err; + int err = 0; u32 qpn; if (root->root_ft) @@ -2516,8 +2516,16 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_sub_ns) return -ENOMEM; + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, + 1); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); + goto out_err; + } + levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); - maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0, + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, + FDB_FAST_PATH, levels); if (IS_ERR(maj_prio)) { err = PTR_ERR(maj_prio); @@ -2542,7 +2550,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) steering->fdb_sub_ns[chain] = ns; } - maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1); if (IS_ERR(maj_prio)) { err = PTR_ERR(maj_prio); goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index cb9fa3430c53..3b98fcdd7d0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work) nic_state = mlx5_get_nic_state(dev); if (nic_state == MLX5_NIC_IFC_INVALID) { - dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); + mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n"); return; } - dev_err(&dev->pdev->dev, "starting health recovery flow\n"); + mlx5_core_err(dev, "starting health recovery flow\n"); mlx5_recover_device(dev); } @@ -180,8 +180,8 @@ static void health_care(struct work_struct *work) if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else - dev_err(&dev->pdev->dev, - "new health works are not permitted at this stage\n"); + mlx5_core_err(dev, + "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } @@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) - dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); + mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i, + ioread32be(h->assert_var + i)); - dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); - dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); + mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n", + ioread32be(&h->assert_exit_ptr)); + mlx5_core_err(dev, "assert_callra 0x%08x\n", + ioread32be(&h->assert_callra)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); - dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); - dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); - dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); - dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); - dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); + mlx5_core_err(dev, "fw_ver %s\n", fw_str); + mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); + mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index)); + mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd), + hsynd_str(ioread8(&h->synd))); + mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); - dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); + mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long get_next_poll_jiffies(void) @@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev) if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) queue_work(health->wq, &health->work); else - dev_err(&dev->pdev->dev, - "new health works are not permitted at this stage\n"); + mlx5_core_err(dev, "new health works are not permitted at this stage\n"); spin_unlock_irqrestore(&health->wq_lock, flags); } @@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t) health->prev = count; if (health->miss_counter == MAX_MISSES) { - dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); + mlx5_core_err(dev, "device's health compromised - reached miss count\n"); print_health_info(dev); } @@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) cancel_delayed_work_sync(&dev->priv.health.recover_work); } +void mlx5_health_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + flush_workqueue(health->wq); +} + void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) return -ENOMEM; strcpy(name, "mlx5_health"); - strcat(name, dev_name(&dev->pdev->dev)); + strcat(name, dev->priv.name); health->wq = create_singlethread_workqueue(name); kfree(name); if (!health->wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 4eac42555c7d..9b03ae1e1e10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -77,15 +77,14 @@ int mlx5i_init(struct mlx5_core_dev *mdev, void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); - u16 max_mtu; int err; err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); if (err) return err; - mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - netdev->mtu = max_mtu; + mlx5e_set_netdev_mtu_boundaries(priv); + netdev->mtu = netdev->max_mtu; mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params, mlx5e_get_netdev_max_channels(netdev), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 5633f8572800..8212bfd05733 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -122,7 +122,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, /* Handle add/replace event */ if (fi->fib_nhs == 1) { if (__mlx5_lag_is_active(ldev)) { - struct net_device *nh_dev = fi->fib_nh[0].nh_dev; + struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev; int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); mlx5_lag_set_port_affinity(ldev, ++i); @@ -134,10 +134,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; /* Verify next hops are ports of the same hca */ - if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev && - fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) && - !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev && - fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) { + if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev && + fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) && + !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev && + fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) { mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n"); return; } @@ -167,7 +167,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, /* nh added/removed */ if (event == FIB_EVENT_NH_DEL) { - int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev); + int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev); if (i >= 0) { i = (i + 1) % 2 + 1; /* peer port */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c index 40f4a19b1ce1..be69c1d7941a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c @@ -80,10 +80,8 @@ void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy, mlx5_query_port_tun_entropy(mdev, &entropy_flags); tun_entropy->num_enabling_entries = 0; tun_entropy->num_disabling_entries = 0; - tun_entropy->enabled = entropy_flags.calc_enabled; - tun_entropy->enabled = - (entropy_flags.calc_supported) ? - entropy_flags.calc_enabled : true; + tun_entropy->enabled = entropy_flags.calc_supported ? + entropy_flags.calc_enabled : true; } static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 9a8fd762167b..b9d4f4e19ff9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -33,6 +33,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mlx5/driver.h> +#include <net/vxlan.h> #include "mlx5_core.h" #include "vxlan.h" @@ -204,8 +205,8 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) spin_lock_init(&vxlan->lock); hash_init(vxlan->htable); - /* Hardware adds 4789 by default */ - mlx5_vxlan_add_port(vxlan, 4789); + /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */ + mlx5_vxlan_add_port(vxlan, IANA_VXLAN_UDP_PORT); return vxlan; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 76716419370d..5245b0b1770f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -567,24 +567,23 @@ query_ex: static int set_hca_cap(struct mlx5_core_dev *dev) { - struct pci_dev *pdev = dev->pdev; int err; err = handle_hca_cap(dev); if (err) { - dev_err(&pdev->dev, "handle_hca_cap failed\n"); + mlx5_core_err(dev, "handle_hca_cap failed\n"); goto out; } err = handle_hca_cap_atomic(dev); if (err) { - dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n"); + mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); goto out; } err = handle_hca_cap_odp(dev); if (err) { - dev_err(&pdev->dev, "handle_hca_cap_odp failed\n"); + mlx5_core_err(dev, "handle_hca_cap_odp failed\n"); goto out; } @@ -716,36 +715,29 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -EOPNOTSUPP; } -static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) +static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, + const struct pci_device_id *id) { - struct pci_dev *pdev = dev->pdev; + struct mlx5_priv *priv = &dev->priv; int err = 0; - pci_set_drvdata(dev->pdev, dev); - strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); - priv->name[MLX5_MAX_NAME_LEN - 1] = 0; - - mutex_init(&priv->pgdir_mutex); - INIT_LIST_HEAD(&priv->pgdir_list); - spin_lock_init(&priv->mkey_lock); + dev->pdev = pdev; + priv->pci_dev_data = id->driver_data; - mutex_init(&priv->alloc_mutex); + pci_set_drvdata(dev->pdev, dev); + dev->bar_addr = pci_resource_start(pdev, 0); priv->numa_node = dev_to_node(&dev->pdev->dev); - if (mlx5_debugfs_root) - priv->dbg_root = - debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root); - err = mlx5_pci_enable_device(dev); if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); - goto err_dbg; + mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); + return err; } err = request_bar(pdev); if (err) { - dev_err(&pdev->dev, "error requesting BARs, aborting\n"); + mlx5_core_err(dev, "error requesting BARs, aborting\n"); goto err_disable; } @@ -753,7 +745,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) err = set_dma_caps(pdev); if (err) { - dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); + mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); goto err_clr_master; } @@ -762,11 +754,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128)) mlx5_core_dbg(dev, "Enabling pci atomics failed\n"); - dev->iseg_base = pci_resource_start(dev->pdev, 0); + dev->iseg_base = dev->bar_addr; dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { err = -ENOMEM; - dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); + mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); goto err_clr_master; } @@ -777,52 +769,47 @@ err_clr_master: release_bar(dev->pdev); err_disable: mlx5_pci_disable_device(dev); - -err_dbg: - debugfs_remove(priv->dbg_root); return err; } -static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) +static void mlx5_pci_close(struct mlx5_core_dev *dev) { iounmap(dev->iseg); pci_clear_master(dev->pdev); release_bar(dev->pdev); mlx5_pci_disable_device(dev); - debugfs_remove_recursive(priv->dbg_root); } -static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) +static int mlx5_init_once(struct mlx5_core_dev *dev) { - struct pci_dev *pdev = dev->pdev; int err; - priv->devcom = mlx5_devcom_register_device(dev); - if (IS_ERR(priv->devcom)) - dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n", - priv->devcom); + dev->priv.devcom = mlx5_devcom_register_device(dev); + if (IS_ERR(dev->priv.devcom)) + mlx5_core_err(dev, "failed to register with devcom (0x%p)\n", + dev->priv.devcom); err = mlx5_query_board_id(dev); if (err) { - dev_err(&pdev->dev, "query board id failed\n"); + mlx5_core_err(dev, "query board id failed\n"); goto err_devcom; } err = mlx5_eq_table_init(dev); if (err) { - dev_err(&pdev->dev, "failed to initialize eq\n"); + mlx5_core_err(dev, "failed to initialize eq\n"); goto err_devcom; } err = mlx5_events_init(dev); if (err) { - dev_err(&pdev->dev, "failed to initialize events\n"); + mlx5_core_err(dev, "failed to initialize events\n"); goto err_eq_cleanup; } err = mlx5_cq_debugfs_init(dev); if (err) { - dev_err(&pdev->dev, "failed to initialize cq debugfs\n"); + mlx5_core_err(dev, "failed to initialize cq debugfs\n"); goto err_events_cleanup; } @@ -838,31 +825,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) err = mlx5_init_rl_table(dev); if (err) { - dev_err(&pdev->dev, "Failed to init rate limiting\n"); + mlx5_core_err(dev, "Failed to init rate limiting\n"); goto err_tables_cleanup; } err = mlx5_mpfs_init(dev); if (err) { - dev_err(&pdev->dev, "Failed to init l2 table %d\n", err); + mlx5_core_err(dev, "Failed to init l2 table %d\n", err); goto err_rl_cleanup; } err = mlx5_eswitch_init(dev); if (err) { - dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); + mlx5_core_err(dev, "Failed to init eswitch %d\n", err); goto err_mpfs_cleanup; } err = mlx5_sriov_init(dev); if (err) { - dev_err(&pdev->dev, "Failed to init sriov %d\n", err); + mlx5_core_err(dev, "Failed to init sriov %d\n", err); goto err_eswitch_cleanup; } err = mlx5_fpga_init(dev); if (err) { - dev_err(&pdev->dev, "Failed to init fpga device %d\n", err); + mlx5_core_err(dev, "Failed to init fpga device %d\n", err); goto err_sriov_cleanup; } @@ -912,93 +899,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_devcom_unregister_device(dev->priv.devcom); } -static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, - bool boot) +static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) { - struct pci_dev *pdev = dev->pdev; int err; - dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); - mutex_lock(&dev->intf_state_mutex); - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", - __func__); - goto out; - } - - dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), - fw_rev_min(dev), fw_rev_sub(dev)); + mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), + fw_rev_min(dev), fw_rev_sub(dev)); /* Only PFs hold the relevant PCIe information for this query */ if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - /* on load removing any previous indication of internal error, device is - * up - */ - dev->state = MLX5_DEVICE_STATE_UP; - /* wait for firmware to accept initialization segments configurations */ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); if (err) { - dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", - FW_PRE_INIT_TIMEOUT_MILI); - goto out_err; + mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + return err; } err = mlx5_cmd_init(dev); if (err) { - dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); - goto out_err; + mlx5_core_err(dev, "Failed initializing command interface, aborting\n"); + return err; } err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); if (err) { - dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", - FW_INIT_TIMEOUT_MILI); + mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n", + FW_INIT_TIMEOUT_MILI); goto err_cmd_cleanup; } err = mlx5_core_enable_hca(dev, 0); if (err) { - dev_err(&pdev->dev, "enable hca failed\n"); + mlx5_core_err(dev, "enable hca failed\n"); goto err_cmd_cleanup; } err = mlx5_core_set_issi(dev); if (err) { - dev_err(&pdev->dev, "failed to set issi\n"); + mlx5_core_err(dev, "failed to set issi\n"); goto err_disable_hca; } err = mlx5_satisfy_startup_pages(dev, 1); if (err) { - dev_err(&pdev->dev, "failed to allocate boot pages\n"); + mlx5_core_err(dev, "failed to allocate boot pages\n"); goto err_disable_hca; } err = set_hca_ctrl(dev); if (err) { - dev_err(&pdev->dev, "set_hca_ctrl failed\n"); + mlx5_core_err(dev, "set_hca_ctrl failed\n"); goto reclaim_boot_pages; } err = set_hca_cap(dev); if (err) { - dev_err(&pdev->dev, "set_hca_cap failed\n"); + mlx5_core_err(dev, "set_hca_cap failed\n"); goto reclaim_boot_pages; } err = mlx5_satisfy_startup_pages(dev, 0); if (err) { - dev_err(&pdev->dev, "failed to allocate init pages\n"); + mlx5_core_err(dev, "failed to allocate init pages\n"); goto reclaim_boot_pages; } err = mlx5_cmd_init_hca(dev, sw_owner_id); if (err) { - dev_err(&pdev->dev, "init hca failed\n"); + mlx5_core_err(dev, "init hca failed\n"); goto reclaim_boot_pages; } @@ -1008,23 +980,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, err = mlx5_query_hca_caps(dev); if (err) { - dev_err(&pdev->dev, "query hca failed\n"); - goto err_stop_poll; + mlx5_core_err(dev, "query hca failed\n"); + goto stop_health; } - if (boot) { - err = mlx5_init_once(dev, priv); - if (err) { - dev_err(&pdev->dev, "sw objs init failed\n"); - goto err_stop_poll; - } + return 0; + +stop_health: + mlx5_stop_health_poll(dev, boot); +reclaim_boot_pages: + mlx5_reclaim_startup_pages(dev); +err_disable_hca: + mlx5_core_disable_hca(dev, 0); +err_cmd_cleanup: + mlx5_cmd_cleanup(dev); + + return err; +} + +static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) +{ + int err; + + mlx5_stop_health_poll(dev, boot); + err = mlx5_cmd_teardown_hca(dev); + if (err) { + mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); + return err; } + mlx5_reclaim_startup_pages(dev); + mlx5_core_disable_hca(dev, 0); + mlx5_cmd_cleanup(dev); + + return 0; +} + +static int mlx5_load(struct mlx5_core_dev *dev) +{ + int err; dev->priv.uar = mlx5_get_uars_page(dev); if (IS_ERR(dev->priv.uar)) { - dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); + mlx5_core_err(dev, "Failed allocating uar, aborting\n"); err = PTR_ERR(dev->priv.uar); - goto err_get_uars; + return err; } mlx5_events_start(dev); @@ -1032,132 +1031,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, err = mlx5_eq_table_create(dev); if (err) { - dev_err(&pdev->dev, "Failed to create EQs\n"); + mlx5_core_err(dev, "Failed to create EQs\n"); goto err_eq_table; } err = mlx5_fw_tracer_init(dev->tracer); if (err) { - dev_err(&pdev->dev, "Failed to init FW tracer\n"); + mlx5_core_err(dev, "Failed to init FW tracer\n"); goto err_fw_tracer; } err = mlx5_fpga_device_start(dev); if (err) { - dev_err(&pdev->dev, "fpga device start failed %d\n", err); + mlx5_core_err(dev, "fpga device start failed %d\n", err); goto err_fpga_start; } err = mlx5_accel_ipsec_init(dev); if (err) { - dev_err(&pdev->dev, "IPSec device start failed %d\n", err); + mlx5_core_err(dev, "IPSec device start failed %d\n", err); goto err_ipsec_start; } err = mlx5_accel_tls_init(dev); if (err) { - dev_err(&pdev->dev, "TLS device start failed %d\n", err); + mlx5_core_err(dev, "TLS device start failed %d\n", err); goto err_tls_start; } err = mlx5_init_fs(dev); if (err) { - dev_err(&pdev->dev, "Failed to init flow steering\n"); + mlx5_core_err(dev, "Failed to init flow steering\n"); goto err_fs; } err = mlx5_core_set_hca_defaults(dev); if (err) { - dev_err(&pdev->dev, "Failed to set hca defaults\n"); + mlx5_core_err(dev, "Failed to set hca defaults\n"); goto err_fs; } err = mlx5_sriov_attach(dev); if (err) { - dev_err(&pdev->dev, "sriov init failed %d\n", err); + mlx5_core_err(dev, "sriov init failed %d\n", err); goto err_sriov; } err = mlx5_ec_init(dev); if (err) { - dev_err(&pdev->dev, "Failed to init embedded CPU\n"); + mlx5_core_err(dev, "Failed to init embedded CPU\n"); goto err_ec; } - if (mlx5_device_registered(dev)) { - mlx5_attach_device(dev); - } else { - err = mlx5_register_device(dev); - if (err) { - dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); - goto err_reg_dev; - } - } - - set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); -out: - mutex_unlock(&dev->intf_state_mutex); - return 0; -err_reg_dev: - mlx5_ec_cleanup(dev); - err_ec: mlx5_sriov_detach(dev); - err_sriov: mlx5_cleanup_fs(dev); - err_fs: mlx5_accel_tls_cleanup(dev); - err_tls_start: mlx5_accel_ipsec_cleanup(dev); - err_ipsec_start: mlx5_fpga_device_stop(dev); - err_fpga_start: mlx5_fw_tracer_cleanup(dev->tracer); - err_fw_tracer: mlx5_eq_table_destroy(dev); - err_eq_table: mlx5_pagealloc_stop(dev); mlx5_events_stop(dev); - mlx5_put_uars_page(dev, priv->uar); + mlx5_put_uars_page(dev, dev->priv.uar); + return err; +} -err_get_uars: - if (boot) - mlx5_cleanup_once(dev); +static void mlx5_unload(struct mlx5_core_dev *dev) +{ + mlx5_ec_cleanup(dev); + mlx5_sriov_detach(dev); + mlx5_cleanup_fs(dev); + mlx5_accel_ipsec_cleanup(dev); + mlx5_accel_tls_cleanup(dev); + mlx5_fpga_device_stop(dev); + mlx5_fw_tracer_cleanup(dev->tracer); + mlx5_eq_table_destroy(dev); + mlx5_pagealloc_stop(dev); + mlx5_events_stop(dev); + mlx5_put_uars_page(dev, dev->priv.uar); +} -err_stop_poll: - mlx5_stop_health_poll(dev, boot); - if (mlx5_cmd_teardown_hca(dev)) { - dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); - goto out_err; +static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) +{ + int err = 0; + + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); + mutex_lock(&dev->intf_state_mutex); + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { + mlx5_core_warn(dev, "interface is up, NOP\n"); + goto out; } + /* remove any previous indication of internal error */ + dev->state = MLX5_DEVICE_STATE_UP; -reclaim_boot_pages: - mlx5_reclaim_startup_pages(dev); + err = mlx5_function_setup(dev, boot); + if (err) + goto out; -err_disable_hca: - mlx5_core_disable_hca(dev, 0); + if (boot) { + err = mlx5_init_once(dev); + if (err) { + mlx5_core_err(dev, "sw objs init failed\n"); + goto function_teardown; + } + } -err_cmd_cleanup: - mlx5_cmd_cleanup(dev); + err = mlx5_load(dev); + if (err) + goto err_load; -out_err: + if (mlx5_device_registered(dev)) { + mlx5_attach_device(dev); + } else { + err = mlx5_register_device(dev); + if (err) { + mlx5_core_err(dev, "register device failed %d\n", err); + goto err_reg_dev; + } + } + + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); +out: + mutex_unlock(&dev->intf_state_mutex); + + return err; + +err_reg_dev: + mlx5_unload(dev); +err_load: + if (boot) + mlx5_cleanup_once(dev); +function_teardown: + mlx5_function_teardown(dev, boot); dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; mutex_unlock(&dev->intf_state_mutex); return err; } -static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, - bool cleanup) +static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) { int err = 0; @@ -1166,8 +1188,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mutex_lock(&dev->intf_state_mutex); if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", - __func__); + mlx5_core_warn(dev, "%s: interface is down, NOP\n", + __func__); if (cleanup) mlx5_cleanup_once(dev); goto out; @@ -1178,30 +1200,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (mlx5_device_registered(dev)) mlx5_detach_device(dev); - mlx5_ec_cleanup(dev); - mlx5_sriov_detach(dev); - mlx5_cleanup_fs(dev); - mlx5_accel_ipsec_cleanup(dev); - mlx5_accel_tls_cleanup(dev); - mlx5_fpga_device_stop(dev); - mlx5_fw_tracer_cleanup(dev->tracer); - mlx5_eq_table_destroy(dev); - mlx5_pagealloc_stop(dev); - mlx5_events_stop(dev); - mlx5_put_uars_page(dev, priv->uar); + mlx5_unload(dev); + if (cleanup) mlx5_cleanup_once(dev); - mlx5_stop_health_poll(dev, cleanup); - - err = mlx5_cmd_teardown_hca(dev); - if (err) { - dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); - goto out; - } - mlx5_reclaim_startup_pages(dev); - mlx5_core_disable_hca(dev, 0); - mlx5_cmd_cleanup(dev); + mlx5_function_teardown(dev, cleanup); out: mutex_unlock(&dev->intf_state_mutex); return err; @@ -1218,29 +1222,15 @@ static const struct devlink_ops mlx5_devlink_ops = { #endif }; -#define MLX5_IB_MOD "mlx5_ib" -static int init_one(struct pci_dev *pdev, - const struct pci_device_id *id) +static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name) { - struct mlx5_core_dev *dev; - struct devlink *devlink; - struct mlx5_priv *priv; + struct mlx5_priv *priv = &dev->priv; int err; - devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev)); - if (!devlink) { - dev_err(&pdev->dev, "kzalloc failed\n"); - return -ENOMEM; - } - - dev = devlink_priv(devlink); - priv = &dev->priv; - priv->pci_dev_data = id->driver_data; - - pci_set_drvdata(pdev, dev); + strncpy(priv->name, name, MLX5_MAX_NAME_LEN); + priv->name[MLX5_MAX_NAME_LEN - 1] = 0; - dev->pdev = pdev; - dev->profile = &profile[prof_sel]; + dev->profile = &profile[profile_idx]; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); @@ -1252,25 +1242,72 @@ static int init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&priv->bfregs.reg_head.list); INIT_LIST_HEAD(&priv->bfregs.wc_head.list); - err = mlx5_pci_init(dev, priv); - if (err) { - dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); - goto clean_dev; + mutex_init(&priv->alloc_mutex); + mutex_init(&priv->pgdir_mutex); + INIT_LIST_HEAD(&priv->pgdir_list); + spin_lock_init(&priv->mkey_lock); + + priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root); + if (!priv->dbg_root) { + pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name); + return -ENOMEM; } err = mlx5_health_init(dev); - if (err) { - dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err); - goto close_pci; - } + if (err) + goto err_health_init; err = mlx5_pagealloc_init(dev); if (err) goto err_pagealloc_init; - err = mlx5_load_one(dev, priv, true); + return 0; + +err_pagealloc_init: + mlx5_health_cleanup(dev); +err_health_init: + debugfs_remove(dev->priv.dbg_root); + + return err; +} + +static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) +{ + mlx5_pagealloc_cleanup(dev); + mlx5_health_cleanup(dev); + debugfs_remove_recursive(dev->priv.dbg_root); +} + +#define MLX5_IB_MOD "mlx5_ib" +static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlx5_core_dev *dev; + struct devlink *devlink; + int err; + + devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev)); + if (!devlink) { + dev_err(&pdev->dev, "kzalloc failed\n"); + return -ENOMEM; + } + + dev = devlink_priv(devlink); + + err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev)); + if (err) + goto mdev_init_err; + + err = mlx5_pci_init(dev, pdev, id); + if (err) { + mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n", + err); + goto pci_init_err; + } + + err = mlx5_load_one(dev, true); if (err) { - dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err); + mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n", + err); goto err_load_one; } @@ -1284,14 +1321,13 @@ static int init_one(struct pci_dev *pdev, return 0; clean_load: - mlx5_unload_one(dev, priv, true); + mlx5_unload_one(dev, true); + err_load_one: - mlx5_pagealloc_cleanup(dev); -err_pagealloc_init: - mlx5_health_cleanup(dev); -close_pci: - mlx5_pci_close(dev, priv); -clean_dev: + mlx5_pci_close(dev); +pci_init_err: + mlx5_mdev_uninit(dev); +mdev_init_err: devlink_free(devlink); return err; @@ -1301,20 +1337,18 @@ static void remove_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); - struct mlx5_priv *priv = &dev->priv; devlink_unregister(devlink); mlx5_unregister_device(dev); - if (mlx5_unload_one(dev, priv, true)) { - dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); - mlx5_health_cleanup(dev); + if (mlx5_unload_one(dev, true)) { + mlx5_core_err(dev, "mlx5_unload_one failed\n"); + mlx5_health_flush(dev); return; } - mlx5_pagealloc_cleanup(dev); - mlx5_health_cleanup(dev); - mlx5_pci_close(dev, priv); + mlx5_pci_close(dev); + mlx5_mdev_uninit(dev); devlink_free(devlink); } @@ -1322,12 +1356,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_priv *priv = &dev->priv; - dev_info(&pdev->dev, "%s was called\n", __func__); + mlx5_core_info(dev, "%s was called\n", __func__); mlx5_enter_error_state(dev, false); - mlx5_unload_one(dev, priv, false); + mlx5_unload_one(dev, false); /* In case of kernel call drain the health wq */ if (state) { mlx5_drain_health_wq(dev); @@ -1354,7 +1387,9 @@ static int wait_vital(struct pci_dev *pdev) count = ioread32be(health->health_counter); if (count && count != 0xffffffff) { if (last_count && last_count != count) { - dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); + mlx5_core_info(dev, + "wait vital counter value 0x%x after %d iterations\n", + count, i); return 0; } last_count = count; @@ -1370,12 +1405,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); int err; - dev_info(&pdev->dev, "%s was called\n", __func__); + mlx5_core_info(dev, "%s was called\n", __func__); err = mlx5_pci_enable_device(dev); if (err) { - dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" - , __func__, err); + mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n", + __func__, err); return PCI_ERS_RESULT_DISCONNECT; } @@ -1384,7 +1419,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); if (wait_vital(pdev)) { - dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); + mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__); return PCI_ERS_RESULT_DISCONNECT; } @@ -1394,17 +1429,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) static void mlx5_pci_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_priv *priv = &dev->priv; int err; - dev_info(&pdev->dev, "%s was called\n", __func__); + mlx5_core_info(dev, "%s was called\n", __func__); - err = mlx5_load_one(dev, priv, false); + err = mlx5_load_one(dev, false); if (err) - dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" - , __func__, err); + mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n", + __func__, err); else - dev_info(&pdev->dev, "%s: device recovered\n", __func__); + mlx5_core_info(dev, "%s: device recovered\n", __func__); } static const struct pci_error_handlers mlx5_err_handler = { @@ -1466,13 +1500,12 @@ succeed: static void shutdown(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_priv *priv = &dev->priv; int err; - dev_info(&pdev->dev, "Shutdown was called\n"); + mlx5_core_info(dev, "Shutdown was called\n"); err = mlx5_try_fast_unload(dev); if (err) - mlx5_unload_one(dev, priv, false); + mlx5_unload_one(dev, false); mlx5_pci_disable_device(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 7b331674622c..8213c994e205 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -48,12 +48,12 @@ extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ - dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_dbg_once(__dev, format, ...) \ - dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) @@ -64,28 +64,37 @@ do { \ } while (0) #define mlx5_core_err(__dev, format, ...) \ - dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) -#define mlx5_core_err_rl(__dev, format, ...) \ - dev_err_ratelimited(&(__dev)->pdev->dev, \ - "%s:%d:(pid %d): " format, \ - __func__, __LINE__, current->pid, \ +#define mlx5_core_err_rl(__dev, format, ...) \ + pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn(__dev, format, ...) \ - dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn_once(__dev, format, ...) \ - dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_warn_rl(__dev, format, ...) \ + pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_info(__dev, format, ...) \ - dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) + pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__) + +#define mlx5_core_info_rl(__dev, format, ...) \ + pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) enum { MLX5_CMD_DATA, /* print command payload only */ @@ -111,7 +120,6 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); int mlx5_sriov_attach(struct mlx5_core_dev *dev); void mlx5_sriov_detach(struct mlx5_core_dev *dev); int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); -bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, @@ -176,6 +184,11 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); void mlx5e_init(void); void mlx5e_cleanup(void); +static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) +{ + return pci_num_vf(dev->pdev) ? true : false; +} + static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) { /* LACP owner conditions: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 7b23fa8d2d60..a249b3c3843d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -36,13 +36,6 @@ #include "mlx5_core.h" #include "eswitch.h" -bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) -{ - struct mlx5_core_sriov *sriov = &dev->priv.sriov; - - return !!sriov->num_vfs; -} - static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; @@ -151,33 +144,10 @@ out: mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); } -static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs) -{ - struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - int err = 0; - - if (pci_num_vf(pdev)) { - mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n"); - return -EBUSY; - } - - err = pci_enable_sriov(pdev, num_vfs); - if (err) - mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); - - return err; -} - -static void mlx5_pci_disable_sriov(struct pci_dev *pdev) -{ - pci_disable_sriov(pdev); -} - static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_core_sriov *sriov = &dev->priv.sriov; - int err = 0; + int err; err = mlx5_device_enable_sriov(dev, num_vfs); if (err) { @@ -185,42 +155,37 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) return err; } - err = mlx5_pci_enable_sriov(pdev, num_vfs); + err = pci_enable_sriov(pdev, num_vfs); if (err) { - mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err); + mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); mlx5_device_disable_sriov(dev); - return err; } - - sriov->num_vfs = num_vfs; - - return 0; + return err; } static void mlx5_sriov_disable(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - struct mlx5_core_sriov *sriov = &dev->priv.sriov; - mlx5_pci_disable_sriov(pdev); + pci_disable_sriov(pdev); mlx5_device_disable_sriov(dev); - sriov->num_vfs = 0; } int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err = 0; mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); - if (!mlx5_core_is_pf(dev)) - return -EPERM; if (num_vfs) err = mlx5_sriov_enable(pdev, num_vfs); else mlx5_sriov_disable(pdev); + if (!err) + sriov->num_vfs = num_vfs; return err ? err : num_vfs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 94464723ff77..0d006224d7b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) else system_page_index = index; - return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index; + return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index; } static void up_rel_func(struct kref *kref) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index ea934a48c90a..1f87cce421e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -134,6 +134,11 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) *wq->db = cpu_to_be32(wq->wqe_ctr); } +static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { return ctr & wq->fbc.sz_m1; @@ -243,6 +248,13 @@ static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } +static inline u16 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix) +{ + struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix); + + return be16_to_cpu(wqe->next_wqe_index); +} + static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) { wq->head = head_next; diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 9c195dfed031..b6b3ff0fe17f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -4,6 +4,7 @@ config MLXSW_CORE tristate "Mellanox Technologies Switch ASICs support" + select NET_DEVLINK ---help--- This driver supports Mellanox Technologies Switch ASICs family. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index a01d15546e37..c4dc72e1ce63 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -28,8 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum1_mr_tcam.o spectrum2_mr_tcam.o \ spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o \ - spectrum_nve.o spectrum_nve_vxlan.o + spectrum_nve.o spectrum_nve_vxlan.o \ + spectrum_dpipe.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o -mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f26a4ca29363..bcbe07ec22be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -781,7 +781,8 @@ mlxsw_devlink_sb_pool_get(struct devlink *devlink, static int mlxsw_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, u16 pool_index, u32 size, - enum devlink_sb_threshold_type threshold_type) + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; @@ -789,7 +790,8 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink, if (!mlxsw_driver->sb_pool_set) return -EOPNOTSUPP; return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, - pool_index, size, threshold_type); + pool_index, size, threshold_type, + extack); } static void *__dl_port(struct devlink_port *devlink_port) @@ -829,7 +831,8 @@ static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 pool_index, - u32 threshold) + u32 threshold, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; @@ -839,7 +842,7 @@ static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, !mlxsw_core_port_check(mlxsw_core_port)) return -EOPNOTSUPP; return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, - pool_index, threshold); + pool_index, threshold, extack); } static int @@ -864,7 +867,8 @@ static int mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, - u16 pool_index, u32 threshold) + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; @@ -875,7 +879,7 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, return -EOPNOTSUPP; return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, tc_index, pool_type, - pool_index, threshold); + pool_index, threshold, extack); } static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, @@ -934,6 +938,46 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, pool_type, p_cur, p_max); } +static int +mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE]; + u32 hw_rev, fw_major, fw_minor, fw_sub_minor; + char mgir_pl[MLXSW_REG_MGIR_LEN]; + char buf[32]; + int err; + + err = devlink_info_driver_name_put(req, + mlxsw_core->bus_info->device_kind); + if (err) + return err; + + mlxsw_reg_mgir_pack(mgir_pl); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl); + if (err) + return err; + mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major, + &fw_minor, &fw_sub_minor); + + sprintf(buf, "%X", hw_rev); + err = devlink_info_version_fixed_put(req, "hw.revision", buf); + if (err) + return err; + + err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid); + if (err) + return err; + + sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor); + err = devlink_info_version_running_put(req, "fw.version", buf); + if (err) + return err; + + return 0; +} + static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, struct netlink_ext_ack *extack) { @@ -968,6 +1012,7 @@ static const struct devlink_ops mlxsw_devlink_ops = { .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, + .info_get = mlxsw_devlink_info_get, }; static int @@ -1718,7 +1763,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_res_get); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, + u32 port_number, bool split, + u32 split_port_subnumber, + const unsigned char *switch_id, + unsigned char switch_id_len) { struct devlink *devlink = priv_to_devlink(mlxsw_core); struct mlxsw_core_port *mlxsw_core_port = @@ -1727,6 +1776,9 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port) int err; mlxsw_core_port->local_port = local_port; + devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + port_number, split, split_port_subnumber, + switch_id, switch_id_len); err = devlink_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); @@ -1746,17 +1798,13 @@ void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) EXPORT_SYMBOL(mlxsw_core_port_fini); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, - void *port_driver_priv, struct net_device *dev, - u32 port_number, bool split, - u32 split_port_subnumber) + void *port_driver_priv, struct net_device *dev) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; mlxsw_core_port->port_driver_priv = port_driver_priv; - devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - port_number, split, split_port_subnumber); devlink_port_type_eth_set(devlink_port, dev); } EXPORT_SYMBOL(mlxsw_core_port_eth_set); @@ -1796,16 +1844,18 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_type_get); -int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, - u8 local_port, char *name, size_t len) + +struct devlink_port * +mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, + u8 local_port) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; - return devlink_port_get_phys_port_name(devlink_port, name, len); + return devlink_port; } -EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); +EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 8ec53f027575..917be621c904 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -164,20 +164,23 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, u16 lag_id, u8 local_port); void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); -int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port); +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, + u32 port_number, bool split, + u32 split_port_subnumber, + const unsigned char *switch_id, + unsigned char switch_id_len); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, - void *port_driver_priv, struct net_device *dev, - u32 port_number, bool split, - u32 split_port_subnumber); + void *port_driver_priv, struct net_device *dev); void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, u8 local_port); -int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, - u8 local_port, char *name, size_t len); +struct devlink_port * +mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, + u8 local_port); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); @@ -251,13 +254,14 @@ struct mlxsw_driver { struct devlink_sb_pool_info *pool_info); int (*sb_pool_set)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, u32 size, - enum devlink_sb_threshold_type threshold_type); + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 pool_index, u32 *p_threshold); int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 pool_index, - u32 threshold); + u32 threshold, struct netlink_ext_ack *extack); int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, @@ -265,7 +269,8 @@ struct mlxsw_driver { int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, - u16 pool_index, u32 threshold); + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core, unsigned int sb_index); int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 00c390024350..cf2114273b72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -51,33 +51,20 @@ static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) return 0; } -static int -mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) -{ - struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); - struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - u8 local_port = mlxsw_m_port->local_port; - - return mlxsw_core_port_get_phys_port_name(core, local_port, name, len); -} - -static int mlxsw_m_port_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) +static struct devlink_port * +mlxsw_m_port_get_devlink_port(struct net_device *dev) { struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - ppid->id_len = sizeof(mlxsw_m->base_mac); - memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len); - - return 0; + return mlxsw_core_port_devlink_port_get(mlxsw_m->core, + mlxsw_m_port->local_port); } static const struct net_device_ops mlxsw_m_port_netdev_ops = { .ndo_open = mlxsw_m_port_dummy_open_stop, .ndo_stop = mlxsw_m_port_dummy_open_stop, - .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name, - .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id, + .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port, }; static int mlxsw_m_get_module_info(struct net_device *netdev, @@ -150,7 +137,10 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) struct net_device *dev; int err; - err = mlxsw_core_port_init(mlxsw_m->core, local_port); + err = mlxsw_core_port_init(mlxsw_m->core, local_port, + module + 1, false, 0, + mlxsw_m->base_mac, + sizeof(mlxsw_m->base_mac)); if (err) { dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n", local_port); @@ -190,7 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) } mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port, - mlxsw_m_port, dev, module + 1, false, 0); + mlxsw_m_port, dev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index eb4c5e8964cd..e1ee7f4994db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -8534,6 +8534,60 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, mlxsw_reg_mpar_pa_id_set(payload, pa_id); } +/* MGIR - Management General Information Register + * ---------------------------------------------- + * MGIR register allows software to query the hardware and firmware general + * information. + */ +#define MLXSW_REG_MGIR_ID 0x9020 +#define MLXSW_REG_MGIR_LEN 0x9C + +MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN); + +/* reg_mgir_hw_info_device_hw_revision + * Access: RO + */ +MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16); + +#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16 + +/* reg_mgir_fw_info_psid + * PSID (ASCII string). + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mgir, fw_info_psid, 0x30, MLXSW_REG_MGIR_FW_INFO_PSID_SIZE); + +/* reg_mgir_fw_info_extended_major + * Access: RO + */ +MLXSW_ITEM32(reg, mgir, fw_info_extended_major, 0x44, 0, 32); + +/* reg_mgir_fw_info_extended_minor + * Access: RO + */ +MLXSW_ITEM32(reg, mgir, fw_info_extended_minor, 0x48, 0, 32); + +/* reg_mgir_fw_info_extended_sub_minor + * Access: RO + */ +MLXSW_ITEM32(reg, mgir, fw_info_extended_sub_minor, 0x4C, 0, 32); + +static inline void mlxsw_reg_mgir_pack(char *payload) +{ + MLXSW_REG_ZERO(mgir, payload); +} + +static inline void +mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid, + u32 *fw_major, u32 *fw_minor, u32 *fw_sub_minor) +{ + *hw_rev = mlxsw_reg_mgir_hw_info_device_hw_revision_get(payload); + mlxsw_reg_mgir_fw_info_psid_memcpy_from(payload, fw_info_psid); + *fw_major = mlxsw_reg_mgir_fw_info_extended_major_get(payload); + *fw_minor = mlxsw_reg_mgir_fw_info_extended_minor_get(payload); + *fw_sub_minor = mlxsw_reg_mgir_fw_info_extended_sub_minor_get(payload); +} + /* MRSR - Management Reset and Shutdown Register * --------------------------------------------- * MRSR register is used to reset or shutdown the switch or @@ -9958,6 +10012,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), + MLXSW_REG(mgir), MLXSW_REG(mrsr), MLXSW_REG(mlcr), MLXSW_REG(mpsc), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6b8aa3761899..12b176d1d6ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -21,7 +21,7 @@ #include <linux/dcbnl.h> #include <linux/inetdevice.h> #include <linux/netlink.h> -#include <linux/random.h> +#include <linux/jhash.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> @@ -1254,16 +1254,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; } -static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, - size_t len) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - - return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, - mlxsw_sp_port->local_port, - name, len); -} - static struct mlxsw_sp_port_mall_tc_entry * mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, unsigned long cookie) { @@ -1714,16 +1704,14 @@ static int mlxsw_sp_set_features(struct net_device *dev, mlxsw_sp_feature_hw_tc); } -static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) +static struct devlink_port * +mlxsw_sp_port_get_devlink_port(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - ppid->id_len = sizeof(mlxsw_sp->base_mac); - memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len); - - return 0; + return mlxsw_core_port_devlink_port_get(mlxsw_sp->core, + mlxsw_sp_port->local_port); } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -1739,9 +1727,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, - .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, .ndo_set_features = mlxsw_sp_set_features, - .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id, + .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, @@ -3391,7 +3378,10 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, struct net_device *dev; int err; - err = mlxsw_core_port_init(mlxsw_sp->core, local_port); + err = mlxsw_core_port_init(mlxsw_sp->core, local_port, + module + 1, split, lane / width, + mlxsw_sp->base_mac, + sizeof(mlxsw_sp->base_mac)); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", local_port); @@ -3573,8 +3563,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, - mlxsw_sp_port, dev, module + 1, - mlxsw_sp_port->split, lane / width); + mlxsw_sp_port, dev); mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); return 0; @@ -4238,7 +4227,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) u32 seed; int err; - get_random_bytes(&seed, sizeof(seed)); + seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index da6278b0caa4..8601b3041acd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -371,13 +371,14 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, struct devlink_sb_pool_info *pool_info); int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, u32 size, - enum devlink_sb_threshold_type threshold_type); + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 pool_index, u32 *p_threshold); int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 pool_index, - u32 threshold); + u32 threshold, struct netlink_ext_ack *extack); int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, @@ -385,7 +386,8 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, - u16 pool_index, u32 threshold); + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index); int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 8811f6513e36..e993159e8e4c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -216,7 +216,6 @@ struct mlxsw_sp_acl_tcam_vregion { struct mlxsw_sp_acl_tcam_rehash_ctx ctx; } rehash; struct mlxsw_sp *mlxsw_sp; - bool failed_rollback; /* Indicates failed rollback during migration */ unsigned int ref_count; }; @@ -1256,11 +1255,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_chunk *new_chunk; new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region); - if (IS_ERR(new_chunk)) { - if (ctx->this_is_rollback) - vchunk->vregion->failed_rollback = true; + if (IS_ERR(new_chunk)) return PTR_ERR(new_chunk); - } vchunk->chunk2 = vchunk->chunk; vchunk->chunk = new_chunk; ctx->current_vchunk = vchunk; @@ -1318,8 +1314,13 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, vchunk->chunk, credits); if (err) { - if (ctx->this_is_rollback) + if (ctx->this_is_rollback) { + /* Save the ventry which we ended with and try + * to continue later on. + */ + ctx->start_ventry = ventry; return err; + } /* Swap the chunk and chunk2 pointers so the follow-up * rollback call will see the original chunk pointer * in vchunk->chunk. @@ -1397,8 +1398,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, ctx->this_is_rollback = true; err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx, credits); - if (err2) - vregion->failed_rollback = true; + if (err2) { + trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp, + vregion); + dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n"); + /* Let the rollback to be continued later on. */ + } } mutex_unlock(&vregion->lock); trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion); @@ -1423,8 +1428,6 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp, int err; trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion); - if (vregion->failed_rollback) - return -EBUSY; hints_priv = ops->region_rehash_hints_get(vregion->region->priv); if (IS_ERR(hints_priv)) @@ -1471,11 +1474,9 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2; const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - if (!vregion->failed_rollback) { - vregion->region2 = NULL; - mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region); - mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region); - } + vregion->region2 = NULL; + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region); ops->region_rehash_hints_put(ctx->hints_priv); ctx->hints_priv = NULL; } @@ -1506,11 +1507,6 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, ctx, credits); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n"); - if (vregion->failed_rollback) { - trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp, - vregion); - dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n"); - } } if (*credits >= 0) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index d633bef5f105..8512dd49e420 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -6,6 +6,7 @@ #include <linux/dcbnl.h> #include <linux/if_ether.h> #include <linux/list.h> +#include <linux/netlink.h> #include "spectrum.h" #include "core.h" @@ -15,6 +16,8 @@ struct mlxsw_sp_sb_pr { enum mlxsw_reg_sbpr_mode mode; u32 size; + u8 freeze_mode:1, + freeze_size:1; }; struct mlxsw_cp_sb_occ { @@ -27,6 +30,8 @@ struct mlxsw_sp_sb_cm { u32 max_buff; u16 pool_index; struct mlxsw_cp_sb_occ occ; + u8 freeze_pool:1, + freeze_thresh:1; }; #define MLXSW_SP_SB_INFI -1U @@ -48,7 +53,12 @@ struct mlxsw_sp_sb_pool_des { u8 pool; }; -/* Order ingress pools before egress pools. */ +#define MLXSW_SP_SB_POOL_ING 0 +#define MLXSW_SP_SB_POOL_EGR 4 +#define MLXSW_SP_SB_POOL_EGR_MC 8 +#define MLXSW_SP_SB_POOL_ING_CPU 9 +#define MLXSW_SP_SB_POOL_EGR_CPU 10 + static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_INGRESS, 0}, {MLXSW_REG_SBXX_DIR_INGRESS, 1}, @@ -59,6 +69,8 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 2}, {MLXSW_REG_SBXX_DIR_EGRESS, 3}, {MLXSW_REG_SBXX_DIR_EGRESS, 15}, + {MLXSW_REG_SBXX_DIR_INGRESS, 4}, + {MLXSW_REG_SBXX_DIR_EGRESS, 4}, }; static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { @@ -71,6 +83,8 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 2}, {MLXSW_REG_SBXX_DIR_EGRESS, 3}, {MLXSW_REG_SBXX_DIR_EGRESS, 15}, + {MLXSW_REG_SBXX_DIR_INGRESS, 4}, + {MLXSW_REG_SBXX_DIR_EGRESS, 4}, }; #define MLXSW_SP_SB_ING_TC_COUNT 8 @@ -94,6 +108,7 @@ struct mlxsw_sp_sb_vals { unsigned int pool_count; const struct mlxsw_sp_sb_pool_des *pool_dess; const struct mlxsw_sp_sb_pm *pms; + const struct mlxsw_sp_sb_pm *pms_cpu; const struct mlxsw_sp_sb_pr *prs; const struct mlxsw_sp_sb_mm *mms; const struct mlxsw_sp_sb_cm *cms_ingress; @@ -275,7 +290,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { const u32 pbs[] = { [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width, - [9] = 2 * MLXSW_PORT_MAX_MTU, + [9] = MLXSW_PORT_MAX_MTU, }; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; @@ -390,46 +405,60 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) .size = _size, \ } +#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \ + { \ + .mode = _mode, \ + .size = _size, \ + .freeze_mode = _freeze_mode, \ + .freeze_size = _freeze_size, \ + } + #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000 -#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000) #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000 +#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000) +/* Order according to mlxsw_sp1_sb_pool_dess */ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { - /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP1_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE), - /* Egress pools. */ - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP1_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI, + true, true), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP1_SB_PR_CPU_SIZE, true, false), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP1_SB_PR_CPU_SIZE, true, false), }; #define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000 -#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000) #define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000 +#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) +/* Order according to mlxsw_sp2_sb_pool_dess */ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { - /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP2_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE), - /* Egress pools. */ - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP2_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI, + true, true), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_CPU_SIZE, true, false), + MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_CPU_SIZE, true, false), }; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, @@ -464,83 +493,106 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, .pool_index = _pool, \ } +#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool_index = MLXSW_SP_SB_POOL_ING, \ + } + +#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool_index = MLXSW_SP_SB_POOL_EGR, \ + } + +#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \ + .freeze_pool = true, \ + .freeze_thresh = true, \ + } + static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = { - MLXSW_SP_SB_CM(10000, 8, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ - MLXSW_SP_SB_CM(20000, 1, 3), + MLXSW_SP_SB_CM_ING(10000, 8), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU), }; static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = { - MLXSW_SP_SB_CM(0, 7, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), - MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ - MLXSW_SP_SB_CM(20000, 1, 3), + MLXSW_SP_SB_CM_ING(0, 7), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU), }; static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = { - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(1500, 9, 4), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(1, 0xff, 4), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR(1500, 9), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR(1, 0xff), }; static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), - MLXSW_SP_SB_CM(1, 0xff, 4), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR(0, 7), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI), + MLXSW_SP_SB_CM_EGR(1, 0xff), }; -#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4), + MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -648,80 +700,116 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) .max_buff = _max_buff, \ } +/* Order according to mlxsw_sp1_sb_pool_dess */ static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = { - /* Ingress pools. */ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), - MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), - /* Egress pools. */ + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, 7), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), MLXSW_SP_SB_PM(10000, 90000), + MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */ + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), }; +/* Order according to mlxsw_sp2_sb_pool_dess */ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { - /* Ingress pools. */ MLXSW_SP_SB_PM(0, 7), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), - MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), - /* Egress pools. */ + MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 7), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(10000, 90000), + MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */ + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), }; -static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +/* Order according to mlxsw_sp*_sb_pool_dess */ +static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = { + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 90000), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), +}; + +static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_sb_pm *pms, + bool skip_ingress) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - int i; - int err; + int i, err; for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { - const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i]; + const struct mlxsw_sp_sb_pm *pm = &pms[i]; + const struct mlxsw_sp_sb_pool_des *des; u32 max_buff; u32 min_buff; + des = &mlxsw_sp->sb_vals->pool_dess[i]; + if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS) + continue; + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff); max_buff = pm->max_buff; if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i)) max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff); - err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port, - i, min_buff, max_buff); + err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff, + max_buff); if (err) return err; } return 0; } -#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port, + mlxsw_sp->sb_vals->pms, false); +} + +static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu, + true); +} + +#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \ { \ .min_buff = _min_buff, \ .max_buff = _max_buff, \ - .pool_index = _pool, \ + .pool_index = MLXSW_SP_SB_POOL_EGR, \ } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), - MLXSW_SP_SB_MM(0, 6, 4), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), + MLXSW_SP_SB_MM(0, 6), }; static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) @@ -755,21 +843,22 @@ static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp, { int i; - for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) { if (mlxsw_sp->sb_vals->pool_dess[i].dir == - MLXSW_REG_SBXX_DIR_EGRESS) - goto out; - WARN(1, "No egress pools\n"); + MLXSW_REG_SBXX_DIR_INGRESS) + (*p_ingress_len)++; + else + (*p_egress_len)++; + } -out: - *p_ingress_len = i; - *p_egress_len = mlxsw_sp->sb_vals->pool_count - i; + WARN(*p_egress_len == 0, "No egress pools\n"); } const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = { .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess), .pool_dess = mlxsw_sp1_sb_pool_dess, .pms = mlxsw_sp1_sb_pms, + .pms_cpu = mlxsw_sp_cpu_port_sb_pms, .prs = mlxsw_sp1_sb_prs, .mms = mlxsw_sp_sb_mms, .cms_ingress = mlxsw_sp1_sb_cms_ingress, @@ -785,6 +874,7 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess), .pool_dess = mlxsw_sp2_sb_pool_dess, .pms = mlxsw_sp2_sb_pms, + .pms_cpu = mlxsw_sp_cpu_port_sb_pms, .prs = mlxsw_sp2_sb_prs, .mms = mlxsw_sp_sb_mms, .cms_ingress = mlxsw_sp2_sb_cms_ingress, @@ -799,8 +889,8 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { u32 max_headroom_size; - u16 ing_pool_count; - u16 eg_pool_count; + u16 ing_pool_count = 0; + u16 eg_pool_count = 0; int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) @@ -834,6 +924,9 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); if (err) goto err_sb_cpu_port_sb_cms_init; + err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp); + if (err) + goto err_sb_cpu_port_pms_init; err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) goto err_sb_mms_init; @@ -851,6 +944,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err_devlink_sb_register: err_sb_mms_init: +err_sb_cpu_port_pms_init: err_sb_cpu_port_sb_cms_init: err_sb_prs_init: mlxsw_sp_sb_ports_fini(mlxsw_sp); @@ -900,16 +994,32 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, u32 size, - enum devlink_sb_threshold_type threshold_type) + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); + const struct mlxsw_sp_sb_pr *pr; enum mlxsw_reg_sbpr_mode mode; - if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) + mode = (enum mlxsw_reg_sbpr_mode) threshold_type; + pr = &mlxsw_sp->sb_vals->prs[pool_index]; + + if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { + NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size"); return -EINVAL; + } + + if (pr->freeze_mode && pr->mode != mode) { + NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden"); + return -EINVAL; + }; + + if (pr->freeze_size && pr->size != size) { + NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden"); + return -EINVAL; + }; - mode = (enum mlxsw_reg_sbpr_mode) threshold_type; return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, pool_size, false); } @@ -927,7 +1037,8 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index, } static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, - u32 threshold, u32 *p_max_buff) + u32 threshold, u32 *p_max_buff, + struct netlink_ext_ack *extack) { struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); @@ -936,8 +1047,10 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index, val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || - val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) + val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value"); return -EINVAL; + } *p_max_buff = val; } else { *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold); @@ -963,7 +1076,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 pool_index, - u32 threshold) + u32 threshold, struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); @@ -973,7 +1086,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, int err; err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, - threshold, &max_buff); + threshold, &max_buff, extack); if (err) return err; @@ -1004,22 +1117,41 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, - u16 pool_index, u32 threshold) + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_core_port_driver_priv(mlxsw_core_port); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 local_port = mlxsw_sp_port->local_port; + const struct mlxsw_sp_sb_cm *cm; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type; u32 max_buff; int err; - if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) + if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) { + NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden"); return -EINVAL; + } + + if (dir == MLXSW_REG_SBXX_DIR_INGRESS) + cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index]; + else + cm = &mlxsw_sp->sb_vals->cms_egress[tc_index]; + + if (cm->freeze_pool && cm->pool_index != pool_index) { + NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden"); + return -EINVAL; + } + + if (cm->freeze_thresh && cm->max_buff != threshold) { + NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden"); + return -EINVAL; + } err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, - threshold, &max_buff); + threshold, &max_buff, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h index e689576231ab..246dbb3c0e1b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -4,24 +4,9 @@ #ifndef _MLXSW_PIPELINE_H_ #define _MLXSW_PIPELINE_H_ -#if IS_ENABLED(CONFIG_NET_DEVLINK) - int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp); -#else - -static inline int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) -{ - return 0; -} - -static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) -{ -} - -#endif - #define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif" #define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4" #define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6" diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 902e766a8ed3..1cda8a248b12 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -13,9 +13,9 @@ #include <linux/socket.h> #include <linux/route.h> #include <linux/gcd.h> -#include <linux/random.h> #include <linux/if_macvlan.h> #include <linux/refcount.h> +#include <linux/jhash.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@ -2371,7 +2371,7 @@ static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding) MLXSW_REG_RAUHT_OP_WRITE_DELETE; } -static void +static int mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, enum mlxsw_reg_rauht_op op) @@ -2385,10 +2385,10 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, if (neigh_entry->counter_valid) mlxsw_reg_rauht_pack_counter(rauht_pl, neigh_entry->counter_index); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } -static void +static int mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, enum mlxsw_reg_rauht_op op) @@ -2402,7 +2402,7 @@ mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, if (neigh_entry->counter_valid) mlxsw_reg_rauht_pack_counter(rauht_pl, neigh_entry->counter_index); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry) @@ -2424,20 +2424,33 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool adding) { + enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding); + int err; + if (!adding && !neigh_entry->connected) return; neigh_entry->connected = adding; if (neigh_entry->key.n->tbl->family == AF_INET) { - mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, - mlxsw_sp_rauht_op(adding)); + err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, + op); + if (err) + return; } else if (neigh_entry->key.n->tbl->family == AF_INET6) { if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry)) return; - mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, - mlxsw_sp_rauht_op(adding)); + err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, + op); + if (err) + return; } else { WARN_ON_ONCE(1); + return; } + + if (adding) + neigh_entry->key.n->flags |= NTF_OFFLOADED; + else + neigh_entry->key.n->flags &= ~NTF_OFFLOADED; } void @@ -2873,12 +2886,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, return false; list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh; struct in6_addr *gw; int ifindex, weight; - ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex; - weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight; - gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw; + ifindex = fib6_nh->fib_nh_dev->ifindex; + weight = fib6_nh->fib_nh_weight; + gw = &fib6_nh->fib_nh_gw6; if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, weight)) return false; @@ -2944,7 +2958,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) struct net_device *dev; list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { - dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev; + dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev; val ^= dev->ifindex; } @@ -3610,7 +3624,7 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp, const struct fib_nh *fib_nh, enum mlxsw_sp_ipip_type *p_ipipt) { - struct net_device *dev = fib_nh->nh_dev; + struct net_device *dev = fib_nh->fib_nh_dev; return dev && fib_nh->nh_parent->fib_type == RTN_UNICAST && @@ -3637,7 +3651,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, struct fib_nh *fib_nh) { const struct mlxsw_sp_ipip_ops *ipip_ops; - struct net_device *dev = fib_nh->nh_dev; + struct net_device *dev = fib_nh->fib_nh_dev; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_rif *rif; int err; @@ -3681,18 +3695,18 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, struct fib_nh *fib_nh) { - struct net_device *dev = fib_nh->nh_dev; + struct net_device *dev = fib_nh->fib_nh_dev; struct in_device *in_dev; int err; nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; #ifdef CONFIG_IP_ROUTE_MULTIPATH - nh->nh_weight = fib_nh->nh_weight; + nh->nh_weight = fib_nh->fib_nh_weight; #else nh->nh_weight = 1; #endif - memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw)); + memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4)); err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; @@ -3705,7 +3719,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, in_dev = __in_dev_get_rtnl(dev); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && - fib_nh->nh_flags & RTNH_F_LINKDOWN) + fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) return 0; err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); @@ -3804,7 +3818,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, const struct fib_info *fi) { - return fi->fib_nh->nh_scope == RT_SCOPE_LINK || + return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK || mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL); } @@ -3946,9 +3960,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; struct fib6_info *rt = mlxsw_sp_rt6->rt; - if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev && + if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev && ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, - &rt->fib6_nh.nh_gw)) + &rt->fib6_nh.fib_nh_gw6)) return nh; continue; } @@ -3966,7 +3980,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { - nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; return; } @@ -3974,9 +3988,9 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; if (nh->offloaded) - nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; else - nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -3992,7 +4006,7 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; - nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -4008,19 +4022,20 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, - list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; + list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD; return; } list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh; struct mlxsw_sp_nexthop *nh; nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); if (nh && nh->offloaded) - mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; + fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD; else - mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; + fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -4035,7 +4050,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { struct fib6_info *rt = mlxsw_sp_rt6->rt; - rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; + rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -4913,7 +4928,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) { /* RTF_CACHE routes are ignored */ - return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; + return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family; } static struct fib6_info * @@ -4972,8 +4987,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, const struct fib6_info *rt, enum mlxsw_sp_ipip_type *ret) { - return rt->fib6_nh.nh_dev && - mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret); + return rt->fib6_nh.fib_nh_dev && + mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret); } static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, @@ -4983,7 +4998,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, { const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; - struct net_device *dev = rt->fib6_nh.nh_dev; + struct net_device *dev = rt->fib6_nh.fib_nh_dev; struct mlxsw_sp_rif *rif; int err; @@ -5026,11 +5041,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, const struct fib6_info *rt) { - struct net_device *dev = rt->fib6_nh.nh_dev; + struct net_device *dev = rt->fib6_nh.fib_nh_dev; nh->nh_grp = nh_grp; - nh->nh_weight = rt->fib6_nh.nh_weight; - memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr)); + nh->nh_weight = rt->fib6_nh.fib_nh_weight; + memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr)); mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); @@ -5053,7 +5068,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, const struct fib6_info *rt) { - return rt->fib6_flags & RTF_GATEWAY || + return rt->fib6_nh.fib_nh_gw_family || mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); } @@ -6035,6 +6050,10 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, fr_info = container_of(info, struct fib_rule_notifier_info, info); rule = fr_info->rule; + /* Rule only affects locally generated traffic */ + if (rule->iifindex == info->net->loopback_dev->ifindex) + return 0; + switch (info->family) { case AF_INET: if (!fib4_rule_default(rule) && !rule->l3mdev) @@ -6086,10 +6105,20 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return notifier_from_errno(err); break; case FIB_EVENT_ENTRY_ADD: + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ if (router->aborted) { NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); return notifier_from_errno(-EINVAL); } + if (info->family == AF_INET) { + struct fib_entry_notifier_info *fen_info = ptr; + + if (fen_info->fi->fib_nh_is_v6) { + NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); + return notifier_from_errno(-EINVAL); + } + } break; } @@ -7808,7 +7837,7 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) char recr2_pl[MLXSW_REG_RECR2_LEN]; u32 seed; - get_random_bytes(&seed, sizeof(seed)); + seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); mlxsw_reg_recr2_pack(recr2_pl, seed); mlxsw_sp_mp4_hash_init(recr2_pl); mlxsw_sp_mp6_hash_init(recr2_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 536c23c578c3..560a60e522f9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -316,7 +316,11 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, dev = rt->dst.dev; *saddrp = fl4.saddr; - *daddrp = rt->rt_gateway; + if (rt->rt_gw_family == AF_INET) + *daddrp = rt->rt_gw4; + /* can not offload if route has an IPv6 gateway */ + else if (rt->rt_gw_family == AF_INET6) + dev = NULL; out: ip_rt_put(rt); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index bcf2e79a21c8..0d9356b3f65d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -30,6 +30,7 @@ struct mlxsw_sib { struct mlxsw_sib_port **ports; struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; + u8 hw_id[ETH_ALEN]; }; struct mlxsw_sib_port { @@ -102,6 +103,18 @@ mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb, mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); } +static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib) +{ + char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; + int err; + + err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl); + if (err) + return err; + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id); + return 0; +} + static int mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port, bool is_up) @@ -267,7 +280,9 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port, { int err; - err = mlxsw_core_port_init(mlxsw_sib->core, local_port); + err = mlxsw_core_port_init(mlxsw_sib->core, local_port, + module + 1, false, 0, + mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id)); if (err) { dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n", local_port); @@ -439,6 +454,12 @@ static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core, mlxsw_sib->core = mlxsw_core; mlxsw_sib->bus_info = mlxsw_bus_info; + err = mlxsw_sib_hw_id_get(mlxsw_sib); + if (err) { + dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n"); + return err; + } + err = mlxsw_sib_ports_create(mlxsw_sib); if (err) { dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 533fe6235b7c..fc4f19167262 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -379,26 +379,14 @@ mlxsw_sx_port_get_stats64(struct net_device *dev, stats->tx_dropped = tx_dropped; } -static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, - size_t len) -{ - struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); - - return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core, - mlxsw_sx_port->local_port, - name, len); -} - -static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) +static struct devlink_port * +mlxsw_sx_port_get_devlink_port(struct net_device *dev) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; - ppid->id_len = sizeof(mlxsw_sx->hw_id); - memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len); - - return 0; + return mlxsw_core_port_devlink_port_get(mlxsw_sx->core, + mlxsw_sx_port->local_port); } static const struct net_device_ops mlxsw_sx_port_netdev_ops = { @@ -407,8 +395,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_start_xmit = mlxsw_sx_port_xmit, .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, - .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name, - .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id, + .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, @@ -1102,7 +1089,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port, - mlxsw_sx_port, dev, module + 1, false, 0); + mlxsw_sx_port, dev); mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; @@ -1127,7 +1114,9 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, { int err; - err = mlxsw_core_port_init(mlxsw_sx->core, local_port); + err = mlxsw_core_port_init(mlxsw_sx->core, local_port, + module + 1, false, 0, + mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id)); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", local_port); diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 8f72587b5a2c..0567e4f387a5 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Microchip ENC28J60 ethernet driver (MAC + PHY) * @@ -5,11 +6,6 @@ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> * based on enc28j60.c written by David Anders for 2.4 kernel version * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $ */ @@ -18,9 +14,9 @@ #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> +#include <linux/property.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> @@ -28,7 +24,6 @@ #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/spi/spi.h> -#include <linux/of_net.h> #include "enc28j60_hw.h" @@ -41,10 +36,11 @@ (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK) /* Buffer size required for the largest SPI transfer (i.e., reading a - * frame). */ + * frame). + */ #define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN) -#define TX_TIMEOUT (4 * HZ) +#define TX_TIMEOUT (4 * HZ) /* Max TX retries in case of collision as suggested by errata datasheet */ #define MAX_TX_RETRYCOUNT 16 @@ -83,11 +79,12 @@ static struct { /* * SPI read buffer - * wait for the SPI transfer and copy received data to destination + * Wait for the SPI transfer and copy received data to destination. */ static int spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) { + struct device *dev = &priv->spi->dev; u8 *rx_buf = priv->spi_transfer_buf + 4; u8 *tx_buf = priv->spi_transfer_buf; struct spi_transfer tx = { @@ -113,8 +110,8 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) ret = msg.status; } if (ret && netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", - __func__, ret); + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); return ret; } @@ -122,9 +119,9 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) /* * SPI write buffer */ -static int spi_write_buf(struct enc28j60_net *priv, int len, - const u8 *data) +static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data) { + struct device *dev = &priv->spi->dev; int ret; if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0) @@ -134,8 +131,8 @@ static int spi_write_buf(struct enc28j60_net *priv, int len, memcpy(&priv->spi_transfer_buf[1], data, len); ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); if (ret && netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", - __func__, ret); + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); } return ret; } @@ -143,9 +140,9 @@ static int spi_write_buf(struct enc28j60_net *priv, int len, /* * basic SPI read operation */ -static u8 spi_read_op(struct enc28j60_net *priv, u8 op, - u8 addr) +static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr) { + struct device *dev = &priv->spi->dev; u8 tx_buf[2]; u8 rx_buf[4]; u8 val = 0; @@ -159,8 +156,8 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op, tx_buf[0] = op | (addr & ADDR_MASK); ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); if (ret) - printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", - __func__, ret); + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); else val = rx_buf[slen - 1]; @@ -170,28 +167,25 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op, /* * basic SPI write operation */ -static int spi_write_op(struct enc28j60_net *priv, u8 op, - u8 addr, u8 val) +static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val) { + struct device *dev = &priv->spi->dev; int ret; priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK); priv->spi_transfer_buf[1] = val; ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); if (ret && netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", - __func__, ret); + dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n", + __func__, ret); return ret; } static void enc28j60_soft_reset(struct enc28j60_net *priv) { - if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); - spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); /* Errata workaround #1, CLKRDY check is unreliable, - * delay at least 1 mS instead */ + * delay at least 1 ms instead */ udelay(2000); } @@ -203,7 +197,7 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) u8 b = (addr & BANK_MASK) >> 5; /* These registers (EIE, EIR, ESTAT, ECON2, ECON1) - * are present in all banks, no need to switch bank + * are present in all banks, no need to switch bank. */ if (addr >= EIE && addr <= ECON1) return; @@ -242,15 +236,13 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) /* * Register bit field Set */ -static void nolock_reg_bfset(struct enc28j60_net *priv, - u8 addr, u8 mask) +static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) { enc28j60_set_bank(priv, addr); spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask); } -static void locked_reg_bfset(struct enc28j60_net *priv, - u8 addr, u8 mask) +static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask) { mutex_lock(&priv->lock); nolock_reg_bfset(priv, addr, mask); @@ -260,15 +252,13 @@ static void locked_reg_bfset(struct enc28j60_net *priv, /* * Register bit field Clear */ -static void nolock_reg_bfclr(struct enc28j60_net *priv, - u8 addr, u8 mask) +static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) { enc28j60_set_bank(priv, addr); spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask); } -static void locked_reg_bfclr(struct enc28j60_net *priv, - u8 addr, u8 mask) +static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask) { mutex_lock(&priv->lock); nolock_reg_bfclr(priv, addr, mask); @@ -278,15 +268,13 @@ static void locked_reg_bfclr(struct enc28j60_net *priv, /* * Register byte read */ -static int nolock_regb_read(struct enc28j60_net *priv, - u8 address) +static int nolock_regb_read(struct enc28j60_net *priv, u8 address) { enc28j60_set_bank(priv, address); return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); } -static int locked_regb_read(struct enc28j60_net *priv, - u8 address) +static int locked_regb_read(struct enc28j60_net *priv, u8 address) { int ret; @@ -300,8 +288,7 @@ static int locked_regb_read(struct enc28j60_net *priv, /* * Register word read */ -static int nolock_regw_read(struct enc28j60_net *priv, - u8 address) +static int nolock_regw_read(struct enc28j60_net *priv, u8 address) { int rl, rh; @@ -312,8 +299,7 @@ static int nolock_regw_read(struct enc28j60_net *priv, return (rh << 8) | rl; } -static int locked_regw_read(struct enc28j60_net *priv, - u8 address) +static int locked_regw_read(struct enc28j60_net *priv, u8 address) { int ret; @@ -327,15 +313,13 @@ static int locked_regw_read(struct enc28j60_net *priv, /* * Register byte write */ -static void nolock_regb_write(struct enc28j60_net *priv, - u8 address, u8 data) +static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data) { enc28j60_set_bank(priv, address); spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data); } -static void locked_regb_write(struct enc28j60_net *priv, - u8 address, u8 data) +static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data) { mutex_lock(&priv->lock); nolock_regb_write(priv, address, data); @@ -345,8 +329,7 @@ static void locked_regb_write(struct enc28j60_net *priv, /* * Register word write */ -static void nolock_regw_write(struct enc28j60_net *priv, - u8 address, u16 data) +static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data) { enc28j60_set_bank(priv, address); spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data); @@ -354,8 +337,7 @@ static void nolock_regw_write(struct enc28j60_net *priv, (u8) (data >> 8)); } -static void locked_regw_write(struct enc28j60_net *priv, - u8 address, u16 data) +static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data) { mutex_lock(&priv->lock); nolock_regw_write(priv, address, data); @@ -364,20 +346,23 @@ static void locked_regw_write(struct enc28j60_net *priv, /* * Buffer memory read - * Select the starting address and execute a SPI buffer read + * Select the starting address and execute a SPI buffer read. */ -static void enc28j60_mem_read(struct enc28j60_net *priv, - u16 addr, int len, u8 *data) +static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len, + u8 *data) { mutex_lock(&priv->lock); nolock_regw_write(priv, ERDPTL, addr); #ifdef CONFIG_ENC28J60_WRITEVERIFY if (netif_msg_drv(priv)) { + struct device *dev = &priv->spi->dev; u16 reg; + reg = nolock_regw_read(priv, ERDPTL); if (reg != addr) - printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " - "(0x%04x - 0x%04x)\n", __func__, reg, addr); + dev_printk(KERN_DEBUG, dev, + "%s() error writing ERDPT (0x%04x - 0x%04x)\n", + __func__, reg, addr); } #endif spi_read_buf(priv, len, data); @@ -390,6 +375,8 @@ static void enc28j60_mem_read(struct enc28j60_net *priv, static void enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) { + struct device *dev = &priv->spi->dev; + mutex_lock(&priv->lock); /* Set the write pointer to start of transmit buffer area */ nolock_regw_write(priv, EWRPTL, TXSTART_INIT); @@ -398,9 +385,9 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) u16 reg; reg = nolock_regw_read(priv, EWRPTL); if (reg != TXSTART_INIT) - printk(KERN_DEBUG DRV_NAME - ": %s() ERWPT:0x%04x != 0x%04x\n", - __func__, reg, TXSTART_INIT); + dev_printk(KERN_DEBUG, dev, + "%s() ERWPT:0x%04x != 0x%04x\n", + __func__, reg, TXSTART_INIT); } #endif /* Set the TXND pointer to correspond to the packet size given */ @@ -408,30 +395,28 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) /* write per-packet control byte */ spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00); if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME - ": %s() after control byte ERWPT:0x%04x\n", - __func__, nolock_regw_read(priv, EWRPTL)); + dev_printk(KERN_DEBUG, dev, + "%s() after control byte ERWPT:0x%04x\n", + __func__, nolock_regw_read(priv, EWRPTL)); /* copy the packet into the transmit buffer */ spi_write_buf(priv, len, data); if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME - ": %s() after write packet ERWPT:0x%04x, len=%d\n", - __func__, nolock_regw_read(priv, EWRPTL), len); + dev_printk(KERN_DEBUG, dev, + "%s() after write packet ERWPT:0x%04x, len=%d\n", + __func__, nolock_regw_read(priv, EWRPTL), len); mutex_unlock(&priv->lock); } -static unsigned long msec20_to_jiffies; - static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) { - unsigned long timeout = jiffies + msec20_to_jiffies; + struct device *dev = &priv->spi->dev; + unsigned long timeout = jiffies + msecs_to_jiffies(20); /* 20 msec timeout read */ while ((nolock_regb_read(priv, reg) & mask) != val) { if (time_after(jiffies, timeout)) { if (netif_msg_drv(priv)) - dev_dbg(&priv->spi->dev, - "reg %02x ready timeout!\n", reg); + dev_dbg(dev, "reg %02x ready timeout!\n", reg); return -ETIMEDOUT; } cpu_relax(); @@ -449,7 +434,7 @@ static int wait_phy_ready(struct enc28j60_net *priv) /* * PHY register read - * PHY registers are not accessed directly, but through the MII + * PHY registers are not accessed directly, but through the MII. */ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) { @@ -465,7 +450,7 @@ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) /* quit reading */ nolock_regb_write(priv, MICMD, 0x00); /* return the data */ - ret = nolock_regw_read(priv, MIRDL); + ret = nolock_regw_read(priv, MIRDL); mutex_unlock(&priv->lock); return ret; @@ -494,13 +479,13 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev) { int ret; struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; mutex_lock(&priv->lock); if (!priv->hw_enable) { if (netif_msg_drv(priv)) - printk(KERN_INFO DRV_NAME - ": %s: Setting MAC address to %pM\n", - ndev->name, ndev->dev_addr); + dev_info(dev, "%s: Setting MAC address to %pM\n", + ndev->name, ndev->dev_addr); /* NOTE: MAC address in ENC28J60 is byte-backward */ nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); @@ -511,9 +496,9 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev) ret = 0; } else { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME - ": %s() Hardware must be disabled to set " - "Mac address\n", __func__); + dev_printk(KERN_DEBUG, dev, + "%s() Hardware must be disabled to set Mac address\n", + __func__); ret = -EBUSY; } mutex_unlock(&priv->lock); @@ -532,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + ether_addr_copy(dev->dev_addr, address->sa_data); return enc28j60_set_hw_macaddr(dev); } @@ -541,33 +526,36 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr) */ static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg) { + struct device *dev = &priv->spi->dev; + mutex_lock(&priv->lock); - printk(KERN_DEBUG DRV_NAME " %s\n" - "HwRevID: 0x%02x\n" - "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" - " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" - "MAC : MACON1 MACON3 MACON4\n" - " 0x%02x 0x%02x 0x%02x\n" - "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" - " 0x%04x 0x%04x 0x%04x 0x%04x " - "0x%02x 0x%02x 0x%04x\n" - "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" - " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", - msg, nolock_regb_read(priv, EREVID), - nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), - nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), - nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), - nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), - nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), - nolock_regw_read(priv, ERXWRPTL), - nolock_regw_read(priv, ERXRDPTL), - nolock_regb_read(priv, ERXFCON), - nolock_regb_read(priv, EPKTCNT), - nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), - nolock_regw_read(priv, ETXNDL), - nolock_regb_read(priv, MACLCON1), - nolock_regb_read(priv, MACLCON2), - nolock_regb_read(priv, MAPHSUP)); + dev_printk(KERN_DEBUG, dev, + " %s\n" + "HwRevID: 0x%02x\n" + "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" + " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" + "MAC : MACON1 MACON3 MACON4\n" + " 0x%02x 0x%02x 0x%02x\n" + "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" + " 0x%04x 0x%04x 0x%04x 0x%04x " + "0x%02x 0x%02x 0x%04x\n" + "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" + " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", + msg, nolock_regb_read(priv, EREVID), + nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), + nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), + nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), + nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), + nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), + nolock_regw_read(priv, ERXWRPTL), + nolock_regw_read(priv, ERXRDPTL), + nolock_regb_read(priv, ERXFCON), + nolock_regb_read(priv, EPKTCNT), + nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), + nolock_regw_read(priv, ETXNDL), + nolock_regb_read(priv, MACLCON1), + nolock_regb_read(priv, MACLCON2), + nolock_regb_read(priv, MAPHSUP)); mutex_unlock(&priv->lock); } @@ -599,12 +587,13 @@ static u16 rx_packet_start(u16 ptr) static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) { + struct device *dev = &priv->spi->dev; u16 erxrdpt; if (start > 0x1FFF || end > 0x1FFF || start > end) { if (netif_msg_drv(priv)) - printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " - "bad parameters!\n", __func__, start, end); + dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n", + __func__, start, end); return; } /* set receive buffer start + end */ @@ -617,10 +606,12 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) { + struct device *dev = &priv->spi->dev; + if (start > 0x1FFF || end > 0x1FFF || start > end) { if (netif_msg_drv(priv)) - printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " - "bad parameters!\n", __func__, start, end); + dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n", + __func__, start, end); return; } /* set transmit buffer start + end */ @@ -630,14 +621,15 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) /* * Low power mode shrinks power consumption about 100x, so we'd like - * the chip to be in that mode whenever it's inactive. (However, we - * can't stay in lowpower mode during suspend with WOL active.) + * the chip to be in that mode whenever it's inactive. (However, we + * can't stay in low power mode during suspend with WOL active.) */ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) { + struct device *dev = &priv->spi->dev; + if (netif_msg_drv(priv)) - dev_dbg(&priv->spi->dev, "%s power...\n", - is_low ? "low" : "high"); + dev_dbg(dev, "%s power...\n", is_low ? "low" : "high"); mutex_lock(&priv->lock); if (is_low) { @@ -656,11 +648,12 @@ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) static int enc28j60_hw_init(struct enc28j60_net *priv) { + struct device *dev = &priv->spi->dev; u8 reg; if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__, - priv->full_duplex ? "FullDuplex" : "HalfDuplex"); + dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__, + priv->full_duplex ? "FullDuplex" : "HalfDuplex"); mutex_lock(&priv->lock); /* first reset the chip */ @@ -682,15 +675,15 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) /* * Check the RevID. * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or - * damaged + * damaged. */ reg = locked_regb_read(priv, EREVID); if (netif_msg_drv(priv)) - printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg); + dev_info(dev, "chip RevID: 0x%02x\n", reg); if (reg == 0x00 || reg == 0xff) { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", - __func__, reg); + dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n", + __func__, reg); return 0; } @@ -723,7 +716,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) /* * MACLCON1 (default) * MACLCON2 (default) - * Set the maximum packet size which the controller will accept + * Set the maximum packet size which the controller will accept. */ locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN); @@ -750,10 +743,12 @@ static int enc28j60_hw_init(struct enc28j60_net *priv) static void enc28j60_hw_enable(struct enc28j60_net *priv) { + struct device *dev = &priv->spi->dev; + /* enable interrupts */ if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", - __func__); + dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n", + __func__); enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); @@ -772,7 +767,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv) static void enc28j60_hw_disable(struct enc28j60_net *priv) { mutex_lock(&priv->lock); - /* disable interrutps and packet reception */ + /* disable interrupts and packet reception */ nolock_regb_write(priv, EIE, 0x00); nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); priv->hw_enable = false; @@ -793,14 +788,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) priv->full_duplex = (duplex == DUPLEX_FULL); else { if (netif_msg_link(priv)) - dev_warn(&ndev->dev, - "unsupported link setting\n"); + netdev_warn(ndev, "unsupported link setting\n"); ret = -EOPNOTSUPP; } } else { if (netif_msg_link(priv)) - dev_warn(&ndev->dev, "Warning: hw must be disabled " - "to set link mode\n"); + netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n"); ret = -EBUSY; } return ret; @@ -811,21 +804,23 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) */ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) { + struct device *dev = &priv->spi->dev; int endptr; endptr = locked_regw_read(priv, ETXNDL); if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", - endptr + 1); + dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n", + endptr + 1); enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); } static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, - u8 tsv[TSV_SIZE]) + u8 tsv[TSV_SIZE]) { + struct device *dev = &priv->spi->dev; u16 tmp1, tmp2; - printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg); + dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg); tmp1 = tsv[1]; tmp1 <<= 8; tmp1 |= tsv[0]; @@ -834,30 +829,32 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, tmp2 <<= 8; tmp2 |= tsv[4]; - printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d," - " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2); - printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d," - " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE), - TSV_GETBIT(tsv, TSV_TXCRCERROR), - TSV_GETBIT(tsv, TSV_TXLENCHKERROR), - TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); - printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " - "PacketDefer: %d, ExDefer: %d\n", - TSV_GETBIT(tsv, TSV_TXMULTICAST), - TSV_GETBIT(tsv, TSV_TXBROADCAST), - TSV_GETBIT(tsv, TSV_TXPACKETDEFER), - TSV_GETBIT(tsv, TSV_TXEXDEFER)); - printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, " - "Giant: %d, Underrun: %d\n", - TSV_GETBIT(tsv, TSV_TXEXCOLLISION), - TSV_GETBIT(tsv, TSV_TXLATECOLLISION), - TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); - printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, " - "BackPressApp: %d, VLanTagFrame: %d\n", - TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), - TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), - TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), - TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); + dev_printk(KERN_DEBUG, dev, + "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n", + tmp1, tsv[2] & 0x0f, tmp2); + dev_printk(KERN_DEBUG, dev, + "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", + TSV_GETBIT(tsv, TSV_TXDONE), + TSV_GETBIT(tsv, TSV_TXCRCERROR), + TSV_GETBIT(tsv, TSV_TXLENCHKERROR), + TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); + dev_printk(KERN_DEBUG, dev, + "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n", + TSV_GETBIT(tsv, TSV_TXMULTICAST), + TSV_GETBIT(tsv, TSV_TXBROADCAST), + TSV_GETBIT(tsv, TSV_TXPACKETDEFER), + TSV_GETBIT(tsv, TSV_TXEXDEFER)); + dev_printk(KERN_DEBUG, dev, + "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n", + TSV_GETBIT(tsv, TSV_TXEXCOLLISION), + TSV_GETBIT(tsv, TSV_TXLATECOLLISION), + TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); + dev_printk(KERN_DEBUG, dev, + "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n", + TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), + TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), + TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), + TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); } /* @@ -866,27 +863,29 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg, u16 pk_ptr, int len, u16 sts) { - printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n", - msg, pk_ptr); - printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len, - RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); - printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d," - " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK), - RSV_GETBIT(sts, RSV_CRCERROR), - RSV_GETBIT(sts, RSV_LENCHECKERR), - RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); - printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " - "LongDropEvent: %d, CarrierEvent: %d\n", - RSV_GETBIT(sts, RSV_RXMULTICAST), - RSV_GETBIT(sts, RSV_RXBROADCAST), - RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), - RSV_GETBIT(sts, RSV_CARRIEREV)); - printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d," - " UnknownOp: %d, VLanTagFrame: %d\n", - RSV_GETBIT(sts, RSV_RXCONTROLFRAME), - RSV_GETBIT(sts, RSV_RXPAUSEFRAME), - RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), - RSV_GETBIT(sts, RSV_RXTYPEVLAN)); + struct device *dev = &priv->spi->dev; + + dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr); + dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n", + len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); + dev_printk(KERN_DEBUG, dev, + "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", + RSV_GETBIT(sts, RSV_RXOK), + RSV_GETBIT(sts, RSV_CRCERROR), + RSV_GETBIT(sts, RSV_LENCHECKERR), + RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); + dev_printk(KERN_DEBUG, dev, + "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n", + RSV_GETBIT(sts, RSV_RXMULTICAST), + RSV_GETBIT(sts, RSV_RXBROADCAST), + RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), + RSV_GETBIT(sts, RSV_CARRIEREV)); + dev_printk(KERN_DEBUG, dev, + "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n", + RSV_GETBIT(sts, RSV_RXCONTROLFRAME), + RSV_GETBIT(sts, RSV_RXPAUSEFRAME), + RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), + RSV_GETBIT(sts, RSV_RXTYPEVLAN)); } static void dump_packet(const char *msg, int len, const char *data) @@ -904,20 +903,20 @@ static void dump_packet(const char *msg, int len, const char *data) static void enc28j60_hw_rx(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; struct sk_buff *skb = NULL; u16 erxrdpt, next_packet, rxstat; u8 rsv[RSV_SIZE]; int len; if (netif_msg_rx_status(priv)) - printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n", - priv->next_pk_ptr); + netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n", + priv->next_pk_ptr); if (unlikely(priv->next_pk_ptr > RXEND_INIT)) { if (netif_msg_rx_err(priv)) - dev_err(&ndev->dev, - "%s() Invalid packet address!! 0x%04x\n", - __func__, priv->next_pk_ptr); + netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n", + __func__, priv->next_pk_ptr); /* packet address corrupted: reset RX logic */ mutex_lock(&priv->lock); nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); @@ -950,7 +949,7 @@ static void enc28j60_hw_rx(struct net_device *ndev) if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) { if (netif_msg_rx_err(priv)) - dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat); + netdev_err(ndev, "Rx Error (%04x)\n", rxstat); ndev->stats.rx_errors++; if (RSV_GETBIT(rxstat, RSV_CRCERROR)) ndev->stats.rx_crc_errors++; @@ -962,8 +961,7 @@ static void enc28j60_hw_rx(struct net_device *ndev) skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN); if (!skb) { if (netif_msg_rx_err(priv)) - dev_err(&ndev->dev, - "out of memory for Rx'd frame\n"); + netdev_err(ndev, "out of memory for Rx'd frame\n"); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); @@ -983,12 +981,12 @@ static void enc28j60_hw_rx(struct net_device *ndev) /* * Move the RX read pointer to the start of the next * received packet. - * This frees the memory we just read out + * This frees the memory we just read out. */ erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", - __func__, erxrdpt); + dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n", + __func__, erxrdpt); mutex_lock(&priv->lock); nolock_regw_write(priv, ERXRDPTL, erxrdpt); @@ -997,9 +995,9 @@ static void enc28j60_hw_rx(struct net_device *ndev) u16 reg; reg = nolock_regw_read(priv, ERXRDPTL); if (reg != erxrdpt) - printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " - "error (0x%04x - 0x%04x)\n", __func__, - reg, erxrdpt); + dev_printk(KERN_DEBUG, dev, + "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n", + __func__, reg, erxrdpt); } #endif priv->next_pk_ptr = next_packet; @@ -1013,6 +1011,7 @@ static void enc28j60_hw_rx(struct net_device *ndev) */ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) { + struct net_device *ndev = priv->netdev; int epkcnt, erxst, erxnd, erxwr, erxrd; int free_space; @@ -1035,8 +1034,8 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) } mutex_unlock(&priv->lock); if (netif_msg_rx_status(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", - __func__, free_space); + netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n", + __func__, free_space); return free_space; } @@ -1046,24 +1045,25 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) static void enc28j60_check_link_status(struct net_device *ndev) { struct enc28j60_net *priv = netdev_priv(ndev); + struct device *dev = &priv->spi->dev; u16 reg; int duplex; reg = enc28j60_phy_read(priv, PHSTAT2); if (netif_msg_hw(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " - "PHSTAT2: %04x\n", __func__, - enc28j60_phy_read(priv, PHSTAT1), reg); + dev_printk(KERN_DEBUG, dev, + "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__, + enc28j60_phy_read(priv, PHSTAT1), reg); duplex = reg & PHSTAT2_DPXSTAT; if (reg & PHSTAT2_LSTAT) { netif_carrier_on(ndev); if (netif_msg_ifup(priv)) - dev_info(&ndev->dev, "link up - %s\n", - duplex ? "Full duplex" : "Half duplex"); + netdev_info(ndev, "link up - %s\n", + duplex ? "Full duplex" : "Half duplex"); } else { if (netif_msg_ifdown(priv)) - dev_info(&ndev->dev, "link down\n"); + netdev_info(ndev, "link down\n"); netif_carrier_off(ndev); } } @@ -1089,8 +1089,8 @@ static void enc28j60_tx_clear(struct net_device *ndev, bool err) /* * RX handler - * ignore PKTIF because is unreliable! (look at the errata datasheet) - * check EPKTCNT is the suggested workaround. + * Ignore PKTIF because is unreliable! (Look at the errata datasheet) + * Check EPKTCNT is the suggested workaround. * We don't need to clear interrupt flag, automatically done when * enc28j60_hw_rx() decrements the packet counter. * Returns how many packet processed. @@ -1102,13 +1102,14 @@ static int enc28j60_rx_interrupt(struct net_device *ndev) pk_counter = locked_regb_read(priv, EPKTCNT); if (pk_counter && netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter); + netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n", + pk_counter); if (pk_counter > priv->max_pk_counter) { /* update statistics */ priv->max_pk_counter = pk_counter; if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1) - printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n", - priv->max_pk_counter); + netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n", + priv->max_pk_counter); } ret = pk_counter; while (pk_counter-- > 0) @@ -1124,8 +1125,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work) struct net_device *ndev = priv->netdev; int intflags, loop; - if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); /* disable further interrupts */ locked_reg_bfclr(priv, EIE, EIE_INTIE); @@ -1136,16 +1135,16 @@ static void enc28j60_irq_work_handler(struct work_struct *work) if ((intflags & EIR_DMAIF) != 0) { loop++; if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME - ": intDMA(%d)\n", loop); + netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n", + loop); locked_reg_bfclr(priv, EIR, EIR_DMAIF); } /* LINK changed handler */ if ((intflags & EIR_LINKIF) != 0) { loop++; if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME - ": intLINK(%d)\n", loop); + netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n", + loop); enc28j60_check_link_status(ndev); /* read PHIR to clear the flag */ enc28j60_phy_read(priv, PHIR); @@ -1156,13 +1155,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work) bool err = false; loop++; if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME - ": intTX(%d)\n", loop); + netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n", + loop); priv->tx_retry_count = 0; if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) { if (netif_msg_tx_err(priv)) - dev_err(&ndev->dev, - "Tx Error (aborted)\n"); + netdev_err(ndev, "Tx Error (aborted)\n"); err = true; } if (netif_msg_tx_done(priv)) { @@ -1179,8 +1177,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work) loop++; if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME - ": intTXErr(%d)\n", loop); + netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n", + loop); locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); enc28j60_read_tsv(priv, tsv); if (netif_msg_tx_err(priv)) @@ -1194,9 +1192,9 @@ static void enc28j60_irq_work_handler(struct work_struct *work) /* Transmit Late collision check for retransmit */ if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) { if (netif_msg_tx_err(priv)) - printk(KERN_DEBUG DRV_NAME - ": LateCollision TXErr (%d)\n", - priv->tx_retry_count); + netdev_printk(KERN_DEBUG, ndev, + "LateCollision TXErr (%d)\n", + priv->tx_retry_count); if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT) locked_reg_bfset(priv, ECON1, ECON1_TXRTS); @@ -1210,13 +1208,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work) if ((intflags & EIR_RXERIF) != 0) { loop++; if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME - ": intRXErr(%d)\n", loop); + netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n", + loop); /* Check free FIFO space to flag RX overrun */ if (enc28j60_get_free_rxfifo(priv) <= 0) { if (netif_msg_rx_err(priv)) - printk(KERN_DEBUG DRV_NAME - ": RX Overrun\n"); + netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n"); ndev->stats.rx_dropped++; } locked_reg_bfclr(priv, EIR, EIR_RXERIF); @@ -1228,8 +1225,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work) /* re-enable interrupts */ locked_reg_bfset(priv, EIE, EIE_INTIE); - if (netif_msg_intr(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__); } /* @@ -1239,11 +1234,13 @@ static void enc28j60_irq_work_handler(struct work_struct *work) */ static void enc28j60_hw_tx(struct enc28j60_net *priv) { + struct net_device *ndev = priv->netdev; + BUG_ON(!priv->tx_skb); if (netif_msg_tx_queued(priv)) - printk(KERN_DEBUG DRV_NAME - ": Tx Packet Len:%d\n", priv->tx_skb->len); + netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n", + priv->tx_skb->len); if (netif_msg_pktdata(priv)) dump_packet(__func__, @@ -1253,6 +1250,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv) #ifdef CONFIG_ENC28J60_WRITEVERIFY /* readback and verify written data */ if (netif_msg_drv(priv)) { + struct device *dev = &priv->spi->dev; int test_len, k; u8 test_buf[64]; /* limit the test to the first 64 bytes */ int okflag; @@ -1266,16 +1264,14 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv) okflag = 1; for (k = 0; k < test_len; k++) { if (priv->tx_skb->data[k] != test_buf[k]) { - printk(KERN_DEBUG DRV_NAME - ": Error, %d location differ: " - "0x%02x-0x%02x\n", k, - priv->tx_skb->data[k], test_buf[k]); + dev_printk(KERN_DEBUG, dev, + "Error, %d location differ: 0x%02x-0x%02x\n", + k, priv->tx_skb->data[k], test_buf[k]); okflag = 0; } } if (!okflag) - printk(KERN_DEBUG DRV_NAME ": Tx write buffer, " - "verify ERROR!\n"); + dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n"); } #endif /* set TX request flag */ @@ -1287,14 +1283,11 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, { struct enc28j60_net *priv = netdev_priv(dev); - if (netif_msg_tx_queued(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); - /* If some error occurs while trying to transmit this * packet, you should return '1' from this function. * In such a case you _may not_ do anything to the * SKB, it is still owned by the network queueing - * layer when an error is returned. This means you + * layer when an error is returned. This means you * may not modify any SKB fields, you may not free * the SKB, etc. */ @@ -1337,7 +1330,7 @@ static void enc28j60_tx_timeout(struct net_device *ndev) struct enc28j60_net *priv = netdev_priv(ndev); if (netif_msg_timer(priv)) - dev_err(&ndev->dev, DRV_NAME " tx timeout\n"); + netdev_err(ndev, "tx timeout\n"); ndev->stats.tx_errors++; /* can't restart safely under softirq */ @@ -1356,13 +1349,9 @@ static int enc28j60_net_open(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); - if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); - if (!is_valid_ether_addr(dev->dev_addr)) { if (netif_msg_ifup(priv)) - dev_err(&dev->dev, "invalid MAC address %pM\n", - dev->dev_addr); + netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr); return -EADDRNOTAVAIL; } /* Reset the hardware here (and take it out of low power mode) */ @@ -1370,7 +1359,7 @@ static int enc28j60_net_open(struct net_device *dev) enc28j60_hw_disable(priv); if (!enc28j60_hw_init(priv)) { if (netif_msg_ifup(priv)) - dev_err(&dev->dev, "hw_reset() failed\n"); + netdev_err(dev, "hw_reset() failed\n"); return -EINVAL; } /* Update the MAC address (in case user has changed it) */ @@ -1392,9 +1381,6 @@ static int enc28j60_net_close(struct net_device *dev) { struct enc28j60_net *priv = netdev_priv(dev); - if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); - enc28j60_hw_disable(priv); enc28j60_lowpower(priv, true); netif_stop_queue(dev); @@ -1415,16 +1401,16 @@ static void enc28j60_set_multicast_list(struct net_device *dev) if (dev->flags & IFF_PROMISC) { if (netif_msg_link(priv)) - dev_info(&dev->dev, "promiscuous mode\n"); + netdev_info(dev, "promiscuous mode\n"); priv->rxfilter = RXFILTER_PROMISC; } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { if (netif_msg_link(priv)) - dev_info(&dev->dev, "%smulticast mode\n", - (dev->flags & IFF_ALLMULTI) ? "all-" : ""); + netdev_info(dev, "%smulticast mode\n", + (dev->flags & IFF_ALLMULTI) ? "all-" : ""); priv->rxfilter = RXFILTER_MULTI; } else { if (netif_msg_link(priv)) - dev_info(&dev->dev, "normal mode\n"); + netdev_info(dev, "normal mode\n"); priv->rxfilter = RXFILTER_NORMAL; } @@ -1436,20 +1422,21 @@ static void enc28j60_setrx_work_handler(struct work_struct *work) { struct enc28j60_net *priv = container_of(work, struct enc28j60_net, setrx_work); + struct device *dev = &priv->spi->dev; if (priv->rxfilter == RXFILTER_PROMISC) { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n"); + dev_printk(KERN_DEBUG, dev, "promiscuous mode\n"); locked_regb_write(priv, ERXFCON, 0x00); } else if (priv->rxfilter == RXFILTER_MULTI) { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": multicast mode\n"); + dev_printk(KERN_DEBUG, dev, "multicast mode\n"); locked_regb_write(priv, ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN | ERXFCON_MCEN); } else { if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": normal mode\n"); + dev_printk(KERN_DEBUG, dev, "normal mode\n"); locked_regb_write(priv, ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); @@ -1468,7 +1455,7 @@ static void enc28j60_restart_work_handler(struct work_struct *work) enc28j60_net_close(ndev); ret = enc28j60_net_open(ndev); if (unlikely(ret)) { - dev_info(&ndev->dev, " could not restart %d\n", ret); + netdev_info(ndev, "could not restart %d\n", ret); dev_close(ndev); } } @@ -1552,14 +1539,13 @@ static const struct net_device_ops enc28j60_netdev_ops = { static int enc28j60_probe(struct spi_device *spi) { + unsigned char macaddr[ETH_ALEN]; struct net_device *dev; struct enc28j60_net *priv; - const void *macaddr; int ret = 0; if (netif_msg_drv(&debug)) - dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n", - DRV_VERSION); + dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION); dev = alloc_etherdev(sizeof(struct enc28j60_net)); if (!dev) { @@ -1570,8 +1556,7 @@ static int enc28j60_probe(struct spi_device *spi) priv->netdev = dev; /* priv to netdev reference */ priv->spi = spi; /* priv to spi reference */ - priv->msg_enable = netif_msg_init(debug.msg_enable, - ENC28J60_MSG_DEFAULT); + priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT); mutex_init(&priv->lock); INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler); INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); @@ -1582,13 +1567,12 @@ static int enc28j60_probe(struct spi_device *spi) if (!enc28j60_chipset_init(dev)) { if (netif_msg_probe(priv)) - dev_info(&spi->dev, DRV_NAME " chip not found\n"); + dev_info(&spi->dev, "chip not found\n"); ret = -EIO; goto error_irq; } - macaddr = of_get_mac_address(spi->dev.of_node); - if (macaddr) + if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr))) ether_addr_copy(dev->dev_addr, macaddr); else eth_hw_addr_random(dev); @@ -1600,8 +1584,8 @@ static int enc28j60_probe(struct spi_device *spi) ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv); if (ret < 0) { if (netif_msg_probe(priv)) - dev_err(&spi->dev, DRV_NAME ": request irq %d failed " - "(ret = %d)\n", spi->irq, ret); + dev_err(&spi->dev, "request irq %d failed (ret = %d)\n", + spi->irq, ret); goto error_irq; } @@ -1616,11 +1600,10 @@ static int enc28j60_probe(struct spi_device *spi) ret = register_netdev(dev); if (ret) { if (netif_msg_probe(priv)) - dev_err(&spi->dev, "register netdev " DRV_NAME - " failed (ret = %d)\n", ret); + dev_err(&spi->dev, "register netdev failed (ret = %d)\n", + ret); goto error_register; } - dev_info(&dev->dev, DRV_NAME " driver registered\n"); return 0; @@ -1636,9 +1619,6 @@ static int enc28j60_remove(struct spi_device *spi) { struct enc28j60_net *priv = spi_get_drvdata(spi); - if (netif_msg_drv(priv)) - printk(KERN_DEBUG DRV_NAME ": remove\n"); - unregister_netdev(priv->netdev); free_irq(spi->irq, priv); free_netdev(priv->netdev); @@ -1660,22 +1640,7 @@ static struct spi_driver enc28j60_driver = { .probe = enc28j60_probe, .remove = enc28j60_remove, }; - -static int __init enc28j60_init(void) -{ - msec20_to_jiffies = msecs_to_jiffies(20); - - return spi_register_driver(&enc28j60_driver); -} - -module_init(enc28j60_init); - -static void __exit enc28j60_exit(void) -{ - spi_unregister_driver(&enc28j60_driver); -} - -module_exit(enc28j60_exit); +module_spi_driver(enc28j60_driver); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index 549898d5d450..f0d0e09f60e2 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -19,6 +19,7 @@ config NFP tristate "Netronome(R) NFP4000/NFP6000 NIC driver" depends on PCI && PCI_MSI depends on VXLAN || VXLAN=n + select NET_DEVLINK ---help--- This driver supports the Netronome(R) NFP4000/NFP6000 based cards working as a advanced Ethernet NIC. It works with both diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 47c708f08ade..0673f3aa2c8d 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -15,6 +15,7 @@ nfp-objs := \ nfpcore/nfp_resource.o \ nfpcore/nfp_rtsym.o \ nfpcore/nfp_target.o \ + ccm.o \ nfp_asm.o \ nfp_app.o \ nfp_app_nic.o \ diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 9584f03f3efa..69e84ff7f2e5 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed) { + const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET; struct nfp_net *nn = alink->vnic; unsigned int i; int err; + err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len); + if (err) + return err; + /* Write data_len and wipe reserved */ nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN, alink->abm->prio_map_len); @@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed) nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i, packed[i / sizeof(u32)]); - err = nfp_net_reconfig_mbox(nn, - NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET); + err = nfp_net_mbox_reconfig_and_unlock(nn, cmd); if (err) nfp_err(alink->abm->app->cpp, "setting DSCP -> VQ map failed with error %d\n", err); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 4d4ff5844c47..9183b3e85d21 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, } } -static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) +static struct net_device * +nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress) { enum nfp_repr_type rtype; struct nfp_reprs *reprs; @@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = { .eswitch_mode_get = nfp_abm_eswitch_mode_get, .eswitch_mode_set = nfp_abm_eswitch_mode_set, - .repr_get = nfp_abm_repr_get, + .dev_get = nfp_abm_repr_get, }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 9b6cfa697879..bc9850e4ec5e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -6,48 +6,13 @@ #include <linux/bug.h> #include <linux/jiffies.h> #include <linux/skbuff.h> -#include <linux/wait.h> +#include "../ccm.h" #include "../nfp_app.h" #include "../nfp_net.h" #include "fw.h" #include "main.h" -#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4) - -static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf) -{ - u16 used_tags; - - used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last; - - return used_tags > NFP_BPF_TAG_ALLOC_SPAN; -} - -static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf) -{ - /* All FW communication for BPF is request-reply. To make sure we - * don't reuse the message ID too early after timeout - limit the - * number of requests in flight. - */ - if (nfp_bpf_all_tags_busy(bpf)) { - cmsg_warn(bpf, "all FW request contexts busy!\n"); - return -EAGAIN; - } - - WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator)); - return bpf->tag_alloc_next++; -} - -static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag) -{ - WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator)); - - while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) && - bpf->tag_alloc_last != bpf->tag_alloc_next) - bpf->tag_alloc_last++; -} - static struct sk_buff * nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) { @@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) return size; } -static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) -{ - struct cmsg_hdr *hdr; - - hdr = (struct cmsg_hdr *)skb->data; - - return hdr->type; -} - -static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) -{ - struct cmsg_hdr *hdr; - - hdr = (struct cmsg_hdr *)skb->data; - - return be16_to_cpu(hdr->tag); -} - -static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag) -{ - unsigned int msg_tag; - struct sk_buff *skb; - - skb_queue_walk(&bpf->cmsg_replies, skb) { - msg_tag = nfp_bpf_cmsg_get_tag(skb); - if (msg_tag == tag) { - nfp_bpf_free_tag(bpf, tag); - __skb_unlink(skb, &bpf->cmsg_replies); - return skb; - } - } - - return NULL; -} - -static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag) -{ - struct sk_buff *skb; - - nfp_ctrl_lock(bpf->app->ctrl); - skb = __nfp_bpf_reply(bpf, tag); - nfp_ctrl_unlock(bpf->app->ctrl); - - return skb; -} - -static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag) -{ - struct sk_buff *skb; - - nfp_ctrl_lock(bpf->app->ctrl); - skb = __nfp_bpf_reply(bpf, tag); - if (!skb) - nfp_bpf_free_tag(bpf, tag); - nfp_ctrl_unlock(bpf->app->ctrl); - - return skb; -} - -static struct sk_buff * -nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type, - int tag) -{ - struct sk_buff *skb; - int i, err; - - for (i = 0; i < 50; i++) { - udelay(4); - skb = nfp_bpf_reply(bpf, tag); - if (skb) - return skb; - } - - err = wait_event_interruptible_timeout(bpf->cmsg_wq, - skb = nfp_bpf_reply(bpf, tag), - msecs_to_jiffies(5000)); - /* We didn't get a response - try last time and atomically drop - * the tag even if no response is matched. - */ - if (!skb) - skb = nfp_bpf_reply_drop_tag(bpf, tag); - if (err < 0) { - cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n", - err == ERESTARTSYS ? "interrupted" : "error", - type, err); - return ERR_PTR(err); - } - if (!skb) { - cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n", - type); - return ERR_PTR(-ETIMEDOUT); - } - - return skb; -} - -static struct sk_buff * -nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb, - enum nfp_bpf_cmsg_type type, unsigned int reply_size) -{ - struct cmsg_hdr *hdr; - int tag; - - nfp_ctrl_lock(bpf->app->ctrl); - tag = nfp_bpf_alloc_tag(bpf); - if (tag < 0) { - nfp_ctrl_unlock(bpf->app->ctrl); - dev_kfree_skb_any(skb); - return ERR_PTR(tag); - } - - hdr = (void *)skb->data; - hdr->ver = CMSG_MAP_ABI_VERSION; - hdr->type = type; - hdr->tag = cpu_to_be16(tag); - - __nfp_app_ctrl_tx(bpf->app, skb); - - nfp_ctrl_unlock(bpf->app->ctrl); - - skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag); - if (IS_ERR(skb)) - return skb; - - hdr = (struct cmsg_hdr *)skb->data; - if (hdr->type != __CMSG_REPLY(type)) { - cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n", - hdr->type, __CMSG_REPLY(type)); - goto err_free; - } - /* 0 reply_size means caller will do the validation */ - if (reply_size && skb->len != reply_size) { - cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n", - type, skb->len, reply_size); - goto err_free; - } - - return skb; -err_free: - dev_kfree_skb_any(skb); - return ERR_PTR(-EIO); -} - static int nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, struct cmsg_reply_map_simple *reply) @@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) req->map_type = cpu_to_be32(map->map_type); req->map_flags = 0; - skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC, - sizeof(*reply)); + skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC, + sizeof(*reply)); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) req = (void *)skb->data; req->tid = cpu_to_be32(nfp_map->tid); - skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE, - sizeof(*reply)); + skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE, + sizeof(*reply)); if (IS_ERR(skb)) { cmsg_warn(bpf, "leaking map - I/O error\n"); return; @@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, } static int -nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, - enum nfp_bpf_cmsg_type op, +nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) { struct nfp_bpf_map *nfp_map = offmap->dev_priv; @@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, map->value_size); - skb = nfp_bpf_cmsg_communicate(bpf, skb, op, - nfp_bpf_cmsg_map_reply_size(bpf, 1)); + skb = nfp_ccm_communicate(&bpf->ccm, skb, op, + nfp_bpf_cmsg_map_reply_size(bpf, 1)); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -415,34 +236,34 @@ err_free: int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, void *key, void *value, u64 flags) { - return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE, + return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE, key, value, flags, NULL, NULL); } int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) { - return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE, + return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE, key, NULL, 0, NULL, NULL); } int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, void *key, void *value) { - return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP, + return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP, key, NULL, 0, NULL, value); } int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, void *next_key) { - return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST, + return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST, NULL, NULL, 0, next_key, NULL); } int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, void *key, void *next_key) { - return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT, + return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT, key, NULL, 0, next_key, NULL); } @@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_app_bpf *bpf = app->priv; - unsigned int tag; if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) { cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len); - goto err_free; + dev_kfree_skb_any(skb); + return; } - if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { + if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) { if (!nfp_bpf_event_output(bpf, skb->data, skb->len)) dev_consume_skb_any(skb); else dev_kfree_skb_any(skb); - return; } - nfp_ctrl_lock(bpf->app->ctrl); - - tag = nfp_bpf_cmsg_get_tag(skb); - if (unlikely(!test_bit(tag, bpf->tag_allocator))) { - cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n", - tag); - goto err_unlock; - } - - __skb_queue_tail(&bpf->cmsg_replies, skb); - wake_up_interruptible_all(&bpf->cmsg_wq); - - nfp_ctrl_unlock(bpf->app->ctrl); - - return; -err_unlock: - nfp_ctrl_unlock(bpf->app->ctrl); -err_free: - dev_kfree_skb_any(skb); + nfp_ccm_rx(&bpf->ccm, skb); } void nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len) { + const struct nfp_ccm_hdr *hdr = data; struct nfp_app_bpf *bpf = app->priv; - const struct cmsg_hdr *hdr = data; if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) { cmsg_warn(bpf, "cmsg drop - too short %d!\n", len); return; } - if (hdr->type == CMSG_TYPE_BPF_EVENT) + if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT) nfp_bpf_event_output(bpf, data, len); else cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n", diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 721921bcf120..06c4286bd79e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -6,6 +6,7 @@ #include <linux/bitops.h> #include <linux/types.h> +#include "../ccm.h" /* Kernel's enum bpf_reg_type is not uABI so people may change it breaking * our FW ABI. In that case we will do translation in the driver. @@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps { /* * Types defined for map related control messages */ -#define CMSG_MAP_ABI_VERSION 1 - -enum nfp_bpf_cmsg_type { - CMSG_TYPE_MAP_ALLOC = 1, - CMSG_TYPE_MAP_FREE = 2, - CMSG_TYPE_MAP_LOOKUP = 3, - CMSG_TYPE_MAP_UPDATE = 4, - CMSG_TYPE_MAP_DELETE = 5, - CMSG_TYPE_MAP_GETNEXT = 6, - CMSG_TYPE_MAP_GETFIRST = 7, - CMSG_TYPE_BPF_EVENT = 8, - __CMSG_TYPE_MAP_MAX, -}; - -#define CMSG_TYPE_MAP_REPLY_BIT 7 -#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) /* BPF ABIv2 fixed-length control message fields */ #define CMSG_MAP_KEY_LW 16 @@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status { CMSG_RC_ERR_MAP_E2BIG = 7, }; -struct cmsg_hdr { - u8 type; - u8 ver; - __be16 tag; -}; - struct cmsg_reply_map_simple { - struct cmsg_hdr hdr; + struct nfp_ccm_hdr hdr; __be32 rc; }; struct cmsg_req_map_alloc_tbl { - struct cmsg_hdr hdr; + struct nfp_ccm_hdr hdr; __be32 key_size; /* in bytes */ __be32 value_size; /* in bytes */ __be32 max_entries; @@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl { }; struct cmsg_req_map_free_tbl { - struct cmsg_hdr hdr; + struct nfp_ccm_hdr hdr; __be32 tid; }; @@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl { }; struct cmsg_req_map_op { - struct cmsg_hdr hdr; + struct nfp_ccm_hdr hdr; __be32 tid; __be32 count; __be32 flags; @@ -135,7 +114,7 @@ struct cmsg_reply_map_op { }; struct cmsg_bpf_event { - struct cmsg_hdr hdr; + struct nfp_ccm_hdr hdr; __be32 cpu_id; __be64 map_ptr; __be32 data_size; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 275de9f4c61c..9c136da25221 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app) bpf->app = app; app->priv = bpf; - skb_queue_head_init(&bpf->cmsg_replies); - init_waitqueue_head(&bpf->cmsg_wq); INIT_LIST_HEAD(&bpf->map_list); - err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); + err = nfp_ccm_init(&bpf->ccm, app); if (err) goto err_free_bpf; + err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); + if (err) + goto err_clean_ccm; + nfp_bpf_init_capabilities(bpf); err = nfp_bpf_parse_capabilities(app); @@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app) err_free_neutral_maps: rhashtable_destroy(&bpf->maps_neutral); +err_clean_ccm: + nfp_ccm_clean(&bpf->ccm); err_free_bpf: kfree(bpf); return err; @@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app) struct nfp_app_bpf *bpf = app->priv; bpf_offload_dev_destroy(bpf->bpf_dev); - WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); + nfp_ccm_clean(&bpf->ccm); WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); rhashtable_free_and_destroy(&bpf->maps_neutral, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index b25a48218bcf..e54d1ac84df2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/wait.h> +#include "../ccm.h" #include "../nfp_asm.h" #include "fw.h" @@ -84,16 +85,10 @@ enum pkt_vec { /** * struct nfp_app_bpf - bpf app priv structure * @app: backpointer to the app + * @ccm: common control message handler data * * @bpf_dev: BPF offload device handle * - * @tag_allocator: bitmap of control message tags in use - * @tag_alloc_next: next tag bit to allocate - * @tag_alloc_last: next tag bit to be freed - * - * @cmsg_replies: received cmsg replies waiting to be consumed - * @cmsg_wq: work queue for waiting for cmsg replies - * * @cmsg_key_sz: size of key in cmsg element array * @cmsg_val_sz: size of value in cmsg element array * @@ -132,16 +127,10 @@ enum pkt_vec { */ struct nfp_app_bpf { struct nfp_app *app; + struct nfp_ccm ccm; struct bpf_offload_dev *bpf_dev; - DECLARE_BITMAP(tag_allocator, U16_MAX + 1); - u16 tag_alloc_next; - u16 tag_alloc_last; - - struct sk_buff_head cmsg_replies; - struct wait_queue_head cmsg_wq; - unsigned int cmsg_key_sz; unsigned int cmsg_val_sz; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 15dce97650a5..39c9fec222b4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -22,6 +22,7 @@ #include <net/tc_act/tc_mirred.h> #include "main.h" +#include "../ccm.h" #include "../nfp_app.h" #include "../nfp_net_ctrl.h" #include "../nfp_net.h" @@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) return -EINVAL; - if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) + if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) return -EINVAL; rcu_read_lock(); diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c new file mode 100644 index 000000000000..94476e41e261 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/ccm.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2019 Netronome Systems, Inc. */ + +#include <linux/bitops.h> + +#include "ccm.h" +#include "nfp_app.h" +#include "nfp_net.h" + +#define NFP_CCM_TYPE_REPLY_BIT 7 +#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req)) + +#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg) + +#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4) + +static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm) +{ + u16 used_tags; + + used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last; + + return used_tags > NFP_CCM_TAG_ALLOC_SPAN; +} + +static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm) +{ + /* CCM is for FW communication which is request-reply. To make sure + * we don't reuse the message ID too early after timeout - limit the + * number of requests in flight. + */ + if (unlikely(nfp_ccm_all_tags_busy(ccm))) { + ccm_warn(ccm->app, "all FW request contexts busy!\n"); + return -EAGAIN; + } + + WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator)); + return ccm->tag_alloc_next++; +} + +static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag) +{ + WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator)); + + while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) && + ccm->tag_alloc_last != ccm->tag_alloc_next) + ccm->tag_alloc_last++; +} + +static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag) +{ + unsigned int msg_tag; + struct sk_buff *skb; + + skb_queue_walk(&ccm->replies, skb) { + msg_tag = nfp_ccm_get_tag(skb); + if (msg_tag == tag) { + nfp_ccm_free_tag(ccm, tag); + __skb_unlink(skb, &ccm->replies); + return skb; + } + } + + return NULL; +} + +static struct sk_buff * +nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag) +{ + struct sk_buff *skb; + + nfp_ctrl_lock(app->ctrl); + skb = __nfp_ccm_reply(ccm, tag); + nfp_ctrl_unlock(app->ctrl); + + return skb; +} + +static struct sk_buff * +nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag) +{ + struct sk_buff *skb; + + nfp_ctrl_lock(app->ctrl); + skb = __nfp_ccm_reply(ccm, tag); + if (!skb) + nfp_ccm_free_tag(ccm, tag); + nfp_ctrl_unlock(app->ctrl); + + return skb; +} + +static struct sk_buff * +nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app, + enum nfp_ccm_type type, int tag) +{ + struct sk_buff *skb; + int i, err; + + for (i = 0; i < 50; i++) { + udelay(4); + skb = nfp_ccm_reply(ccm, app, tag); + if (skb) + return skb; + } + + err = wait_event_interruptible_timeout(ccm->wq, + skb = nfp_ccm_reply(ccm, app, + tag), + msecs_to_jiffies(5000)); + /* We didn't get a response - try last time and atomically drop + * the tag even if no response is matched. + */ + if (!skb) + skb = nfp_ccm_reply_drop_tag(ccm, app, tag); + if (err < 0) { + ccm_warn(app, "%s waiting for response to 0x%02x: %d\n", + err == ERESTARTSYS ? "interrupted" : "error", + type, err); + return ERR_PTR(err); + } + if (!skb) { + ccm_warn(app, "timeout waiting for response to 0x%02x\n", type); + return ERR_PTR(-ETIMEDOUT); + } + + return skb; +} + +struct sk_buff * +nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb, + enum nfp_ccm_type type, unsigned int reply_size) +{ + struct nfp_app *app = ccm->app; + struct nfp_ccm_hdr *hdr; + int reply_type, tag; + + nfp_ctrl_lock(app->ctrl); + tag = nfp_ccm_alloc_tag(ccm); + if (tag < 0) { + nfp_ctrl_unlock(app->ctrl); + dev_kfree_skb_any(skb); + return ERR_PTR(tag); + } + + hdr = (void *)skb->data; + hdr->ver = NFP_CCM_ABI_VERSION; + hdr->type = type; + hdr->tag = cpu_to_be16(tag); + + __nfp_app_ctrl_tx(app, skb); + + nfp_ctrl_unlock(app->ctrl); + + skb = nfp_ccm_wait_reply(ccm, app, type, tag); + if (IS_ERR(skb)) + return skb; + + reply_type = nfp_ccm_get_type(skb); + if (reply_type != __NFP_CCM_REPLY(type)) { + ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n", + reply_type, __NFP_CCM_REPLY(type)); + goto err_free; + } + /* 0 reply_size means caller will do the validation */ + if (reply_size && skb->len != reply_size) { + ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n", + type, skb->len, reply_size); + goto err_free; + } + + return skb; +err_free: + dev_kfree_skb_any(skb); + return ERR_PTR(-EIO); +} + +void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb) +{ + struct nfp_app *app = ccm->app; + unsigned int tag; + + if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) { + ccm_warn(app, "cmsg drop - too short %d!\n", skb->len); + goto err_free; + } + + nfp_ctrl_lock(app->ctrl); + + tag = nfp_ccm_get_tag(skb); + if (unlikely(!test_bit(tag, ccm->tag_allocator))) { + ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n", + tag); + goto err_unlock; + } + + __skb_queue_tail(&ccm->replies, skb); + wake_up_interruptible_all(&ccm->wq); + + nfp_ctrl_unlock(app->ctrl); + return; + +err_unlock: + nfp_ctrl_unlock(app->ctrl); +err_free: + dev_kfree_skb_any(skb); +} + +int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app) +{ + ccm->app = app; + skb_queue_head_init(&ccm->replies); + init_waitqueue_head(&ccm->wq); + return 0; +} + +void nfp_ccm_clean(struct nfp_ccm *ccm) +{ + WARN_ON(!skb_queue_empty(&ccm->replies)); +} diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h new file mode 100644 index 000000000000..e2fe4b867958 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/ccm.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2019 Netronome Systems, Inc. */ + +#ifndef NFP_CCM_H +#define NFP_CCM_H 1 + +#include <linux/bitmap.h> +#include <linux/skbuff.h> +#include <linux/wait.h> + +struct nfp_app; + +/* Firmware ABI */ + +enum nfp_ccm_type { + NFP_CCM_TYPE_BPF_MAP_ALLOC = 1, + NFP_CCM_TYPE_BPF_MAP_FREE = 2, + NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3, + NFP_CCM_TYPE_BPF_MAP_UPDATE = 4, + NFP_CCM_TYPE_BPF_MAP_DELETE = 5, + NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6, + NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7, + NFP_CCM_TYPE_BPF_BPF_EVENT = 8, + __NFP_CCM_TYPE_MAX, +}; + +#define NFP_CCM_ABI_VERSION 1 + +struct nfp_ccm_hdr { + u8 type; + u8 ver; + __be16 tag; +}; + +static inline u8 nfp_ccm_get_type(struct sk_buff *skb) +{ + struct nfp_ccm_hdr *hdr; + + hdr = (struct nfp_ccm_hdr *)skb->data; + + return hdr->type; +} + +static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb) +{ + struct nfp_ccm_hdr *hdr; + + hdr = (struct nfp_ccm_hdr *)skb->data; + + return be16_to_cpu(hdr->tag); +} + +/* Implementation */ + +/** + * struct nfp_ccm - common control message handling + * @tag_allocator: bitmap of control message tags in use + * @tag_alloc_next: next tag bit to allocate + * @tag_alloc_last: next tag bit to be freed + * + * @replies: received cmsg replies waiting to be consumed + * @wq: work queue for waiting for cmsg replies + */ +struct nfp_ccm { + struct nfp_app *app; + + DECLARE_BITMAP(tag_allocator, U16_MAX + 1); + u16 tag_alloc_next; + u16 tag_alloc_last; + + struct sk_buff_head replies; + struct wait_queue_head wq; +}; + +int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app); +void nfp_ccm_clean(struct nfp_ccm *ccm); +void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb); +struct sk_buff * +nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb, + enum nfp_ccm_type type, unsigned int reply_size); +#endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index e336f6ee94f5..c56e31d9f8a4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -160,9 +160,9 @@ nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; switch (tun->key.tp_dst) { - case htons(NFP_FL_VXLAN_PORT): + case htons(IANA_VXLAN_UDP_PORT): return NFP_FL_TUNNEL_VXLAN; - case htons(NFP_FL_GENEVE_PORT): + case htons(GENEVE_UDP_PORT): if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) return NFP_FL_TUNNEL_GENEVE; /* FALLTHROUGH */ @@ -582,60 +582,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) } } -static int -nfp_fl_pedit(const struct flow_action_entry *act, - struct tc_cls_flower_offload *flow, - char *nfp_action, int *a_len, u32 *csum_updated) -{ - struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); +struct nfp_flower_pedit_acts { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; struct nfp_fl_set_ip4_addrs set_ip_addr; - enum flow_action_mangle_base htype; struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; +}; + +static int +nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action, + int *a_len, struct nfp_flower_pedit_acts *set_act, + u32 *csum_updated) +{ + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); size_t act_size = 0; u8 ip_proto = 0; - u32 offset; - int err; - - memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl)); - memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos)); - memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); - memset(&set_ip6_src, 0, sizeof(set_ip6_src)); - memset(&set_ip_addr, 0, sizeof(set_ip_addr)); - memset(&set_tport, 0, sizeof(set_tport)); - memset(&set_eth, 0, sizeof(set_eth)); - - htype = act->mangle.htype; - offset = act->mangle.offset; - - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - err = nfp_fl_set_eth(act, offset, &set_eth); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - err = nfp_fl_set_ip4(act, offset, &set_ip_addr, - &set_ip_ttl_tos); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: - err = nfp_fl_set_ip6(act, offset, &set_ip6_dst, - &set_ip6_src, &set_ip6_tc_hl_fl); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: - err = nfp_fl_set_tport(act, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_TCP); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - err = nfp_fl_set_tport(act, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_UDP); - break; - default: - return -EOPNOTSUPP; - } - if (err) - return err; if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -644,77 +607,82 @@ nfp_fl_pedit(const struct flow_action_entry *act, ip_proto = match.key->ip_proto; } - if (set_eth.head.len_lw) { - act_size = sizeof(set_eth); - memcpy(nfp_action, &set_eth, act_size); + if (set_act->set_eth.head.len_lw) { + act_size = sizeof(set_act->set_eth); + memcpy(nfp_action, &set_act->set_eth, act_size); *a_len += act_size; } - if (set_ip_ttl_tos.head.len_lw) { + + if (set_act->set_ip_ttl_tos.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_ip_ttl_tos); - memcpy(nfp_action, &set_ip_ttl_tos, act_size); + act_size = sizeof(set_act->set_ip_ttl_tos); + memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size); *a_len += act_size; /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | nfp_fl_csum_l4_to_flag(ip_proto); } - if (set_ip_addr.head.len_lw) { + + if (set_act->set_ip_addr.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_ip_addr); - memcpy(nfp_action, &set_ip_addr, act_size); + act_size = sizeof(set_act->set_ip_addr); + memcpy(nfp_action, &set_act->set_ip_addr, act_size); *a_len += act_size; /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | nfp_fl_csum_l4_to_flag(ip_proto); } - if (set_ip6_tc_hl_fl.head.len_lw) { + + if (set_act->set_ip6_tc_hl_fl.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_ip6_tc_hl_fl); - memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size); + act_size = sizeof(set_act->set_ip6_tc_hl_fl); + memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } - if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { + + if (set_act->set_ip6_dst.head.len_lw && + set_act->set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. */ nfp_action += act_size; - act_size = sizeof(set_ip6_src); - memcpy(nfp_action, &set_ip6_src, act_size); + act_size = sizeof(set_act->set_ip6_src); + memcpy(nfp_action, &set_act->set_ip6_src, act_size); *a_len += act_size; - act_size = sizeof(set_ip6_dst); - memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, - act_size); + act_size = sizeof(set_act->set_ip6_dst); + memcpy(&nfp_action[sizeof(set_act->set_ip6_src)], + &set_act->set_ip6_dst, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); - } else if (set_ip6_dst.head.len_lw) { + } else if (set_act->set_ip6_dst.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_ip6_dst); - memcpy(nfp_action, &set_ip6_dst, act_size); + act_size = sizeof(set_act->set_ip6_dst); + memcpy(nfp_action, &set_act->set_ip6_dst, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); - } else if (set_ip6_src.head.len_lw) { + } else if (set_act->set_ip6_src.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_ip6_src); - memcpy(nfp_action, &set_ip6_src, act_size); + act_size = sizeof(set_act->set_ip6_src); + memcpy(nfp_action, &set_act->set_ip6_src, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); } - if (set_tport.head.len_lw) { + if (set_act->set_tport.head.len_lw) { nfp_action += act_size; - act_size = sizeof(set_tport); - memcpy(nfp_action, &set_tport, act_size); + act_size = sizeof(set_act->set_tport); + memcpy(nfp_action, &set_act->set_tport, act_size); *a_len += act_size; /* Hardware will automatically fix TCP/UDP checksum. */ @@ -725,7 +693,40 @@ nfp_fl_pedit(const struct flow_action_entry *act, } static int -nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act, +nfp_fl_pedit(const struct flow_action_entry *act, + struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, + u32 *csum_updated, struct nfp_flower_pedit_acts *set_act) +{ + enum flow_action_mangle_base htype; + u32 offset; + + htype = act->mangle.htype; + offset = act->mangle.offset; + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + return nfp_fl_set_eth(act, offset, &set_act->set_eth); + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr, + &set_act->set_ip_ttl_tos); + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst, + &set_act->set_ip6_src, + &set_act->set_ip6_tc_hl_fl); + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + return nfp_fl_set_tport(act, offset, &set_act->set_tport, + NFP_FL_ACTION_OPCODE_SET_TCP); + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + return nfp_fl_set_tport(act, offset, &set_act->set_tport, + NFP_FL_ACTION_OPCODE_SET_UDP); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_flower_output_action(struct nfp_app *app, + const struct flow_action_entry *act, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, @@ -775,7 +776,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, - int *out_cnt, u32 *csum_updated) + int *out_cnt, u32 *csum_updated, + struct nfp_flower_pedit_acts *set_act) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; @@ -860,7 +862,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, return 0; case FLOW_ACTION_MANGLE: if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], - a_len, csum_updated)) + a_len, csum_updated, set_act)) return -EOPNOTSUPP; break; case FLOW_ACTION_CSUM: @@ -880,12 +882,49 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, return 0; } +static bool nfp_fl_check_mangle_start(struct flow_action *flow_act, + int current_act_idx) +{ + struct flow_action_entry current_act; + struct flow_action_entry prev_act; + + current_act = flow_act->entries[current_act_idx]; + if (current_act.id != FLOW_ACTION_MANGLE) + return false; + + if (current_act_idx == 0) + return true; + + prev_act = flow_act->entries[current_act_idx - 1]; + + return prev_act.id != FLOW_ACTION_MANGLE; +} + +static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, + int current_act_idx) +{ + struct flow_action_entry current_act; + struct flow_action_entry next_act; + + current_act = flow_act->entries[current_act_idx]; + if (current_act.id != FLOW_ACTION_MANGLE) + return false; + + if (current_act_idx == flow_act->num_entries) + return true; + + next_act = flow_act->entries[current_act_idx + 1]; + + return next_act.id != FLOW_ACTION_MANGLE; +} + int nfp_flower_compile_action(struct nfp_app *app, struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; + struct nfp_flower_pedit_acts set_act; enum nfp_flower_tun_type tun_type; struct flow_action_entry *act; u32 csum_updated = 0; @@ -899,12 +938,18 @@ int nfp_flower_compile_action(struct nfp_app *app, out_cnt = 0; flow_action_for_each(i, act, &flow->rule->action) { + if (nfp_fl_check_mangle_start(&flow->rule->action, i)) + memset(&set_act, 0, sizeof(set_act)); err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, - &out_cnt, &csum_updated); + &out_cnt, &csum_updated, &set_act); if (err) return err; act_cnt++; + if (nfp_fl_check_mangle_end(&flow->rule->action, i)) + nfp_fl_commit_mangle(flow, + &nfp_flow->action_data[act_len], + &act_len, &set_act, &csum_updated); } /* We optimise when the action list is small, this can unfortunately diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index cf9e1118ee8f..7faec6887b8d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) rtnl_lock(); rcu_read_lock(); - netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); + netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL); rcu_read_unlock(); if (!netdev) { nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", @@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) msg = nfp_flower_cmsg_get_data(skb); rcu_read_lock(); - exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); + exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL); rcu_read_unlock(); if (!exists) { nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", @@ -205,6 +205,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) } static void +nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb) +{ + unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); + struct nfp_flower_cmsg_merge_hint *msg; + struct nfp_fl_payload *sub_flows[2]; + int err, i, flow_cnt; + + msg = nfp_flower_cmsg_get_data(skb); + /* msg->count starts at 0 and always assumes at least 1 entry. */ + flow_cnt = msg->count + 1; + + if (msg_len < struct_size(msg, flow, flow_cnt)) { + nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n", + msg_len, struct_size(msg, flow, flow_cnt)); + return; + } + + if (flow_cnt != 2) { + nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n", + flow_cnt); + return; + } + + rtnl_lock(); + for (i = 0; i < flow_cnt; i++) { + u32 ctx = be32_to_cpu(msg->flow[i].host_ctx); + + sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx); + if (!sub_flows[i]) { + nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n"); + goto err_rtnl_unlock; + } + } + + err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]); + /* Only warn on memory fail. Hint veto will not break functionality. */ + if (err == -ENOMEM) + nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n"); + +err_rtnl_unlock: + rtnl_unlock(); +} + +static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_priv *app_priv = app->priv; @@ -222,6 +266,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_MERGE_HINT: + if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) { + nfp_flower_cmsg_merge_hint_rx(app, skb); + break; + } + goto err_default; case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: nfp_tunnel_request_route(app, skb); break; @@ -235,6 +285,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) } /* fall through */ default: +err_default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); goto out; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 0ed51e79db00..a10c29ade5c2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr { /* Types defined for port related control messages */ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, + NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, + NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12, @@ -451,6 +453,16 @@ struct nfp_flower_cmsg_portreify { #define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0) +/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */ +struct nfp_flower_cmsg_merge_hint { + u8 reserved[3]; + u8 count; + struct { + __be32 host_ctx; + __be64 host_cookie; + } __packed flow[0]; +}; + enum nfp_flower_cmsg_port_type { NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, @@ -473,6 +485,13 @@ enum nfp_flower_cmsg_port_vnic_type { #define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0) #define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0) +static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port) +{ + return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) | + FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE, + NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT); +} + static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port) { return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) | diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 408089133599..d476917c8f7d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -22,6 +22,9 @@ #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL +#define NFP_MIN_INT_PORT_ID 1 +#define NFP_MAX_INT_PORT_ID 256 + static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) { return "FLOWER"; @@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) return DEVLINK_ESWITCH_MODE_SWITCHDEV; } +static int +nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv, + struct net_device *netdev) +{ + struct net_device *entry; + int i, id = 0; + + rcu_read_lock(); + idr_for_each_entry(&priv->internal_ports.port_ids, entry, i) + if (entry == netdev) { + id = i; + break; + } + rcu_read_unlock(); + + return id; +} + +static int +nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_flower_priv *priv = app->priv; + int id; + + id = nfp_flower_lookup_internal_port_id(priv, netdev); + if (id > 0) + return id; + + idr_preload(GFP_ATOMIC); + spin_lock_bh(&priv->internal_ports.lock); + id = idr_alloc(&priv->internal_ports.port_ids, netdev, + NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC); + spin_unlock_bh(&priv->internal_ports.lock); + idr_preload_end(); + + return id; +} + +u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, + struct net_device *netdev) +{ + int ext_port; + + if (nfp_netdev_is_nfp_repr(netdev)) { + return nfp_repr_get_port_id(netdev); + } else if (nfp_flower_internal_port_can_offload(app, netdev)) { + ext_port = nfp_flower_get_internal_port_id(app, netdev); + if (ext_port < 0) + return 0; + + return nfp_flower_internal_port_get_port_id(ext_port); + } + + return 0; +} + +static struct net_device * +nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id) +{ + struct nfp_flower_priv *priv = app->priv; + struct net_device *netdev; + + rcu_read_lock(); + netdev = idr_find(&priv->internal_ports.port_ids, port_id); + rcu_read_unlock(); + + return netdev; +} + +static void +nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_flower_priv *priv = app->priv; + int id; + + id = nfp_flower_lookup_internal_port_id(priv, netdev); + if (!id) + return; + + spin_lock_bh(&priv->internal_ports.lock); + idr_remove(&priv->internal_ports.port_ids, id); + spin_unlock_bh(&priv->internal_ports.lock); +} + +static int +nfp_flower_internal_port_event_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event) +{ + if (event == NETDEV_UNREGISTER && + nfp_flower_internal_port_can_offload(app, netdev)) + nfp_flower_free_internal_port_id(app, netdev); + + return NOTIFY_OK; +} + +static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv) +{ + spin_lock_init(&priv->internal_ports.lock); + idr_init(&priv->internal_ports.port_ids); +} + +static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv) +{ + idr_destroy(&priv->internal_ports.port_ids); +} + static struct nfp_flower_non_repr_priv * nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) { @@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) } static struct net_device * -nfp_flower_repr_get(struct nfp_app *app, u32 port_id) +nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress) { enum nfp_repr_type repr_type; struct nfp_reprs *reprs; u8 port = 0; + /* Check if the port is internal. */ + if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) == + NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) { + if (redir_egress) + *redir_egress = true; + port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); + return nfp_flower_get_netdev_from_internal_port_id(app, port); + } + repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); if (repr_type > NFP_REPR_TYPE_MAX) return NULL; @@ -641,11 +760,30 @@ static int nfp_flower_init(struct nfp_app *app) goto err_cleanup_metadata; } + if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { + /* Tell the firmware that the driver supports flow merging. */ + err = nfp_rtsym_write_le(app->pf->rtbl, + "_abi_flower_merge_hint_enable", 1); + if (!err) { + app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; + nfp_flower_internal_port_init(app_priv); + } else if (err == -ENOENT) { + nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); + } else { + goto err_lag_clean; + } + } else { + nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); + } + INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv); return 0; +err_lag_clean: + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + nfp_flower_lag_cleanup(&app_priv->nfp_lag); err_cleanup_metadata: nfp_flower_metadata_cleanup(app); err_free_app_priv: @@ -664,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) nfp_flower_lag_cleanup(&app_priv->nfp_lag); + if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) + nfp_flower_internal_port_cleanup(app_priv); + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; @@ -762,6 +903,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, if (ret & NOTIFY_STOP_MASK) return ret; + ret = nfp_flower_internal_port_event_handler(app, netdev, event); + if (ret & NOTIFY_STOP_MASK) + return ret; + return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } @@ -800,7 +945,7 @@ const struct nfp_app_type app_flower = { .sriov_disable = nfp_flower_sriov_disable, .eswitch_mode_get = eswitch_mode_get, - .repr_get = nfp_flower_repr_get, + .dev_get = nfp_flower_dev_get, .setup_tc = nfp_flower_setup_tc, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c0945a5fd1a4..675f43f06526 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -34,14 +34,13 @@ struct nfp_app; #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 -#define NFP_FL_VXLAN_PORT 4789 -#define NFP_FL_GENEVE_PORT 6081 - /* Extra features bitmap. */ #define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_VLAN_PCP BIT(3) +#define NFP_FL_FEATS_FLOW_MOD BIT(5) +#define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_LAG BIT(31) struct nfp_fl_mask_id { @@ -118,6 +117,16 @@ struct nfp_fl_lag { }; /** + * struct nfp_fl_internal_ports - Flower APP priv data for additional ports + * @port_ids: Assignment of ids to any additional ports + * @lock: Lock for extra ports list + */ +struct nfp_fl_internal_ports { + struct idr port_ids; + spinlock_t lock; +}; + +/** * struct nfp_flower_priv - Flower APP per-vNIC priv data * @app: Back pointer to app * @nn: Pointer to vNIC @@ -131,6 +140,7 @@ struct nfp_fl_lag { * @flow_table: Hash table used to store flower rules * @stats: Stored stats updates for flower rules * @stats_lock: Lock for flower rule stats updates + * @stats_ctx_table: Hash table to map stats contexts to its flow rule * @cmsg_work: Workqueue for control messages processing * @cmsg_skbs_high: List of higher priority skbs for control message * processing @@ -146,6 +156,7 @@ struct nfp_fl_lag { * @non_repr_priv: List of offloaded non-repr ports and their priv data * @active_mem_unit: Current active memory unit for flower rules * @total_mem_units: Total number of available memory units for flower rules + * @internal_ports: Internal port ids used in offloaded rules */ struct nfp_flower_priv { struct nfp_app *app; @@ -160,6 +171,7 @@ struct nfp_flower_priv { struct rhashtable flow_table; struct nfp_fl_stats *stats; spinlock_t stats_lock; /* lock stats */ + struct rhashtable stats_ctx_table; struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_low; @@ -172,6 +184,7 @@ struct nfp_flower_priv { struct list_head non_repr_priv; unsigned int active_mem_unit; unsigned int total_mem_units; + struct nfp_fl_internal_ports internal_ports; }; /** @@ -239,6 +252,25 @@ struct nfp_fl_payload { char *unmasked_data; char *mask_data; char *action_data; + struct list_head linked_flows; + bool in_hw; +}; + +struct nfp_fl_payload_link { + /* A link contains a pointer to a merge flow and an associated sub_flow. + * Each merge flow will feature in 2 links to its underlying sub_flows. + * A sub_flow will have at least 1 link to a merge flow or more if it + * has been used to create multiple merge flows. + * + * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists + * all links to sub_flows (sub_flow.flow) via merge.list. + * For a sub_flow, 'linked_flows' gives all links to merge flows it has + * formed (merge_flow.flow) via sub_flow.list. + */ + struct { + struct list_head list; + struct nfp_fl_payload *flow; + } merge_flow, sub_flow; }; extern const struct rhashtable_params nfp_flower_table_params; @@ -250,12 +282,40 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; +static inline bool +nfp_flower_internal_port_can_offload(struct nfp_app *app, + struct net_device *netdev) +{ + struct nfp_flower_priv *app_priv = app->priv; + + if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)) + return false; + if (!netdev->rtnl_link_ops) + return false; + if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) + return true; + + return false; +} + +/* The address of the merged flow acts as its cookie. + * Cookies supplied to us by TC flower are also addresses to allocated + * memory and thus this scheme should not generate any collisions. + */ +static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay) +{ + return flow_pay->tc_flower_cookie == (unsigned long)flow_pay; +} + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_ctx_split); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); +int nfp_flower_merge_offloaded_flows(struct nfp_app *app, + struct nfp_fl_payload *sub_flow1, + struct nfp_fl_payload *sub_flow2); int nfp_flower_compile_flow_match(struct nfp_app *app, struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, @@ -270,6 +330,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_flow, struct net_device *netdev); +void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv, + struct nfp_fl_payload *nfp_flow); int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload *nfp_flow); @@ -277,6 +339,8 @@ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, struct net_device *netdev); struct nfp_fl_payload * +nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id); +struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); @@ -314,4 +378,6 @@ void __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv); void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); +u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, + struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 9b8b843d0340..bfa4bf34911d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type) { - u32 cmsg_port = 0; + u32 port_id; int err; u8 *ext; u8 *msk; - if (nfp_netdev_is_nfp_repr(netdev)) - cmsg_port = nfp_repr_get_port_id(netdev); + port_id = nfp_flower_get_port_id_from_netdev(app, netdev); memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); @@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, - cmsg_port, false, tun_type); + port_id, false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, - cmsg_port, true, tun_type); + port_id, true, tun_type); if (err) return err; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 492837b852b6..3d326efdc814 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg { unsigned long cookie; }; +struct nfp_fl_stats_ctx_to_flow { + struct rhash_head ht_node; + u32 stats_cxt; + struct nfp_fl_payload *flow; +}; + +static const struct rhashtable_params stats_ctx_table_params = { + .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt), + .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node), + .key_len = sizeof(u32), +}; + static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) { struct nfp_flower_priv *priv = app->priv; @@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, if (!mask_entry) return false; - if (meta_flags) - *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; - *mask_id = mask_entry->mask_id; mask_entry->ref_cnt--; if (!mask_entry->ref_cnt) { @@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, struct net_device *netdev) { + struct nfp_fl_stats_ctx_to_flow *ctx_entry; struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *check_entry; u8 new_mask_id; u32 stats_cxt; + int err; - if (nfp_get_stats_entry(app, &stats_cxt)) - return -ENOENT; + err = nfp_get_stats_entry(app, &stats_cxt); + if (err) + return err; nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); nfp_flow->ingress_dev = netdev; + ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL); + if (!ctx_entry) { + err = -ENOMEM; + goto err_release_stats; + } + + ctx_entry->stats_cxt = stats_cxt; + ctx_entry->flow = nfp_flow; + + if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node, + stats_ctx_table_params)) { + err = -ENOMEM; + goto err_free_ctx_entry; + } + new_mask_id = 0; if (!nfp_check_mask_add(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id)) { - if (nfp_release_stats_entry(app, stats_cxt)) - return -EINVAL; - return -ENOENT; + err = -ENOENT; + goto err_remove_rhash; } nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); @@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app, check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (check_entry) { - if (nfp_release_stats_entry(app, stats_cxt)) - return -EINVAL; - - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, - nfp_flow->meta.mask_len, - NULL, &new_mask_id)) - return -EINVAL; - - return -EEXIST; + err = -EEXIST; + goto err_remove_mask; } return 0; + +err_remove_mask: + nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, + NULL, &new_mask_id); +err_remove_rhash: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, + &ctx_entry->ht_node, + stats_ctx_table_params)); +err_free_ctx_entry: + kfree(ctx_entry); +err_release_stats: + nfp_release_stats_entry(app, stats_cxt); + + return err; +} + +void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv, + struct nfp_fl_payload *nfp_flow) +{ + nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); + priv->flower_version++; } int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload *nfp_flow) { + struct nfp_fl_stats_ctx_to_flow *ctx_entry; struct nfp_flower_priv *priv = app->priv; u8 new_mask_id = 0; u32 temp_ctx_id; + __nfp_modify_flow_metadata(priv, nfp_flow); + nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, &nfp_flow->meta.flags, &new_mask_id); - nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version); - priv->flower_version++; - /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; - /* Release the stats ctx id. */ + /* Release the stats ctx id and ctx to flow table entry. */ temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id, + stats_ctx_table_params); + if (!ctx_entry) + return -ENOENT; + + WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, + &ctx_entry->ht_node, + stats_ctx_table_params)); + kfree(ctx_entry); + return nfp_release_stats_entry(app, temp_ctx_id); } +struct nfp_fl_payload * +nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id) +{ + struct nfp_fl_stats_ctx_to_flow *ctx_entry; + struct nfp_flower_priv *priv = app->priv; + + ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id, + stats_ctx_table_params); + if (!ctx_entry) + return NULL; + + return ctx_entry->flow; +} + static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) { @@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) return err; + err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params); + if (err) + goto err_free_flow_table; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_flow_table; + goto err_free_stats_ctx_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -447,6 +516,8 @@ err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_stats_ctx_table: + rhashtable_destroy(&priv->stats_ctx_table); err_free_flow_table: rhashtable_destroy(&priv->flow_table); return -ENOMEM; @@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) rhashtable_free_and_destroy(&priv->flow_table, nfp_check_rhashtable_empty, NULL); + rhashtable_free_and_destroy(&priv->stats_ctx_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 450d7296fd57..aefe211da82c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -55,6 +55,28 @@ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) +#define NFP_FLOWER_MERGE_FIELDS \ + (NFP_FLOWER_LAYER_PORT | \ + NFP_FLOWER_LAYER_MAC | \ + NFP_FLOWER_LAYER_TP | \ + NFP_FLOWER_LAYER_IPV4 | \ + NFP_FLOWER_LAYER_IPV6) + +struct nfp_flower_merge_check { + union { + struct { + __be16 tci; + struct nfp_flower_mac_mpls l2; + struct nfp_flower_tp_ports l4; + union { + struct nfp_flower_ipv4 ipv4; + struct nfp_flower_ipv6 ipv6; + }; + }; + unsigned long vals[8]; + }; +}; + static int nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, u8 mtype) @@ -195,7 +217,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_enc_opts(rule, &enc_op); switch (enc_ports.key->dst) { - case htons(NFP_FL_VXLAN_PORT): + case htons(IANA_VXLAN_UDP_PORT): *tun_type = NFP_FL_TUNNEL_VXLAN; key_layer |= NFP_FLOWER_LAYER_VXLAN; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); @@ -203,7 +225,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, if (enc_op.key) return -EOPNOTSUPP; break; - case htons(NFP_FL_GENEVE_PORT): + case htons(GENEVE_UDP_PORT): if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) return -EOPNOTSUPP; *tun_type = NFP_FL_TUNNEL_GENEVE; @@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, break; case cpu_to_be16(ETH_P_IPV6): - key_layer |= NFP_FLOWER_LAYER_IPV6; + key_layer |= NFP_FLOWER_LAYER_IPV6; key_size += sizeof(struct nfp_flower_ipv6); break; @@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; + INIT_LIST_HEAD(&flow_pay->linked_flows); + flow_pay->in_hw = false; return flow_pay; @@ -388,6 +412,447 @@ err_free_flow: return NULL; } +static int +nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, + struct nfp_flower_merge_check *merge, + u8 *last_act_id, int *act_out) +{ + struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; + struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; + struct nfp_fl_set_ip4_addrs *ipv4_add; + struct nfp_fl_set_ipv6_addr *ipv6_add; + struct nfp_fl_push_vlan *push_vlan; + struct nfp_fl_set_tport *tport; + struct nfp_fl_set_eth *eth; + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + u8 act_id = 0; + u8 *ports; + int i; + + while (act_off < flow->meta.act_len) { + a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; + act_id = a->jump_id; + + switch (act_id) { + case NFP_FL_ACTION_OPCODE_OUTPUT: + if (act_out) + (*act_out)++; + break; + case NFP_FL_ACTION_OPCODE_PUSH_VLAN: + push_vlan = (struct nfp_fl_push_vlan *)a; + if (push_vlan->vlan_tci) + merge->tci = cpu_to_be16(0xffff); + break; + case NFP_FL_ACTION_OPCODE_POP_VLAN: + merge->tci = cpu_to_be16(0); + break; + case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL: + /* New tunnel header means l2 to l4 can be matched. */ + eth_broadcast_addr(&merge->l2.mac_dst[0]); + eth_broadcast_addr(&merge->l2.mac_src[0]); + memset(&merge->l4, 0xff, + sizeof(struct nfp_flower_tp_ports)); + memset(&merge->ipv4, 0xff, + sizeof(struct nfp_flower_ipv4)); + break; + case NFP_FL_ACTION_OPCODE_SET_ETHERNET: + eth = (struct nfp_fl_set_eth *)a; + for (i = 0; i < ETH_ALEN; i++) + merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; + for (i = 0; i < ETH_ALEN; i++) + merge->l2.mac_src[i] |= + eth->eth_addr_mask[ETH_ALEN + i]; + break; + case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: + ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; + merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; + merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; + break; + case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: + ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; + merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; + merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; + break; + case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: + ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; + for (i = 0; i < 4; i++) + merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= + ipv6_add->ipv6[i].mask; + break; + case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: + ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; + for (i = 0; i < 4; i++) + merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= + ipv6_add->ipv6[i].mask; + break; + case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: + ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; + merge->ipv6.ip_ext.ttl |= + ipv6_tc_hl_fl->ipv6_hop_limit_mask; + merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; + merge->ipv6.ipv6_flow_label_exthdr |= + ipv6_tc_hl_fl->ipv6_label_mask; + break; + case NFP_FL_ACTION_OPCODE_SET_UDP: + case NFP_FL_ACTION_OPCODE_SET_TCP: + tport = (struct nfp_fl_set_tport *)a; + ports = (u8 *)&merge->l4.port_src; + for (i = 0; i < 4; i++) + ports[i] |= tport->tp_port_mask[i]; + break; + case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: + case NFP_FL_ACTION_OPCODE_PRE_LAG: + case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: + break; + default: + return -EOPNOTSUPP; + } + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + if (last_act_id) + *last_act_id = act_id; + + return 0; +} + +static int +nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, + struct nfp_flower_merge_check *merge, + bool extra_fields) +{ + struct nfp_flower_meta_tci *meta_tci; + u8 *mask = flow->mask_data; + u8 key_layer, match_size; + + memset(merge, 0, sizeof(struct nfp_flower_merge_check)); + + meta_tci = (struct nfp_flower_meta_tci *)mask; + key_layer = meta_tci->nfp_flow_key_layer; + + if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) + return -EOPNOTSUPP; + + merge->tci = meta_tci->tci; + mask += sizeof(struct nfp_flower_meta_tci); + + if (key_layer & NFP_FLOWER_LAYER_EXT_META) + mask += sizeof(struct nfp_flower_ext_meta); + + mask += sizeof(struct nfp_flower_in_port); + + if (key_layer & NFP_FLOWER_LAYER_MAC) { + match_size = sizeof(struct nfp_flower_mac_mpls); + memcpy(&merge->l2, mask, match_size); + mask += match_size; + } + + if (key_layer & NFP_FLOWER_LAYER_TP) { + match_size = sizeof(struct nfp_flower_tp_ports); + memcpy(&merge->l4, mask, match_size); + mask += match_size; + } + + if (key_layer & NFP_FLOWER_LAYER_IPV4) { + match_size = sizeof(struct nfp_flower_ipv4); + memcpy(&merge->ipv4, mask, match_size); + } + + if (key_layer & NFP_FLOWER_LAYER_IPV6) { + match_size = sizeof(struct nfp_flower_ipv6); + memcpy(&merge->ipv6, mask, match_size); + } + + return 0; +} + +static int +nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, + struct nfp_fl_payload *sub_flow2) +{ + /* Two flows can be merged if sub_flow2 only matches on bits that are + * either matched by sub_flow1 or set by a sub_flow1 action. This + * ensures that every packet that hits sub_flow1 and recirculates is + * guaranteed to hit sub_flow2. + */ + struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; + int err, act_out = 0; + u8 last_act_id = 0; + + err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, + true); + if (err) + return err; + + err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, + false); + if (err) + return err; + + err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge, + &last_act_id, &act_out); + if (err) + return err; + + /* Must only be 1 output action and it must be the last in sequence. */ + if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) + return -EOPNOTSUPP; + + /* Reject merge if sub_flow2 matches on something that is not matched + * on or set in an action by sub_flow1. + */ + err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals, + sub_flow1_merge.vals, + sizeof(struct nfp_flower_merge_check) * 8); + if (err) + return -EINVAL; + + return 0; +} + +static unsigned int +nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, + bool *tunnel_act) +{ + unsigned int act_off = 0, act_len; + struct nfp_fl_act_head *a; + u8 act_id = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&act_src[act_off]; + act_len = a->len_lw << NFP_FL_LW_SIZ; + act_id = a->jump_id; + + switch (act_id) { + case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: + if (tunnel_act) + *tunnel_act = true; + /* fall through */ + case NFP_FL_ACTION_OPCODE_PRE_LAG: + memcpy(act_dst + act_off, act_src + act_off, act_len); + break; + default: + return act_off; + } + + act_off += act_len; + } + + return act_off; +} + +static int nfp_fl_verify_post_tun_acts(char *acts, int len) +{ + struct nfp_fl_act_head *a; + unsigned int act_off = 0; + + while (act_off < len) { + a = (struct nfp_fl_act_head *)&acts[act_off]; + if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) + return -EOPNOTSUPP; + + act_off += a->len_lw << NFP_FL_LW_SIZ; + } + + return 0; +} + +static int +nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, + struct nfp_fl_payload *sub_flow2, + struct nfp_fl_payload *merge_flow) +{ + unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; + bool tunnel_act = false; + char *merge_act; + int err; + + /* The last action of sub_flow1 must be output - do not merge this. */ + sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); + sub2_act_len = sub_flow2->meta.act_len; + + if (!sub2_act_len) + return -EINVAL; + + if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) + return -EINVAL; + + /* A shortcut can only be applied if there is a single action. */ + if (sub1_act_len) + merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + else + merge_flow->meta.shortcut = sub_flow2->meta.shortcut; + + merge_flow->meta.act_len = sub1_act_len + sub2_act_len; + merge_act = merge_flow->action_data; + + /* Copy any pre-actions to the start of merge flow action list. */ + pre_off1 = nfp_flower_copy_pre_actions(merge_act, + sub_flow1->action_data, + sub1_act_len, &tunnel_act); + merge_act += pre_off1; + sub1_act_len -= pre_off1; + pre_off2 = nfp_flower_copy_pre_actions(merge_act, + sub_flow2->action_data, + sub2_act_len, NULL); + merge_act += pre_off2; + sub2_act_len -= pre_off2; + + /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes + * a tunnel, sub_flow 2 can only have output actions for a valid merge. + */ + if (tunnel_act) { + char *post_tun_acts = &sub_flow2->action_data[pre_off2]; + + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len); + if (err) + return err; + } + + /* Copy remaining actions from sub_flows 1 and 2. */ + memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); + merge_act += sub1_act_len; + memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); + + return 0; +} + +/* Flow link code should only be accessed under RTNL. */ +static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) +{ + list_del(&link->merge_flow.list); + list_del(&link->sub_flow.list); + kfree(link); +} + +static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, + struct nfp_fl_payload *sub_flow) +{ + struct nfp_fl_payload_link *link; + + list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) + if (link->sub_flow.flow == sub_flow) { + nfp_flower_unlink_flow(link); + return; + } +} + +static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, + struct nfp_fl_payload *sub_flow) +{ + struct nfp_fl_payload_link *link; + + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + link->merge_flow.flow = merge_flow; + list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); + link->sub_flow.flow = sub_flow; + list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); + + return 0; +} + +/** + * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. + * @app: Pointer to the APP handle + * @sub_flow1: Initial flow matched to produce merge hint + * @sub_flow2: Post recirculation flow matched in merge hint + * + * Combines 2 flows (if valid) to a single flow, removing the initial from hw + * and offloading the new, merged flow. + * + * Return: negative value on error, 0 in success. + */ +int nfp_flower_merge_offloaded_flows(struct nfp_app *app, + struct nfp_fl_payload *sub_flow1, + struct nfp_fl_payload *sub_flow2) +{ + struct tc_cls_flower_offload merge_tc_off; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_payload *merge_flow; + struct nfp_fl_key_ls merge_key_ls; + int err; + + ASSERT_RTNL(); + + if (sub_flow1 == sub_flow2 || + nfp_flower_is_merge_flow(sub_flow1) || + nfp_flower_is_merge_flow(sub_flow2)) + return -EINVAL; + + err = nfp_flower_can_merge(sub_flow1, sub_flow2); + if (err) + return err; + + merge_key_ls.key_size = sub_flow1->meta.key_len; + + merge_flow = nfp_flower_allocate_new(&merge_key_ls); + if (!merge_flow) + return -ENOMEM; + + merge_flow->tc_flower_cookie = (unsigned long)merge_flow; + merge_flow->ingress_dev = sub_flow1->ingress_dev; + + memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, + sub_flow1->meta.key_len); + memcpy(merge_flow->mask_data, sub_flow1->mask_data, + sub_flow1->meta.mask_len); + + err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); + if (err) + goto err_destroy_merge_flow; + + err = nfp_flower_link_flows(merge_flow, sub_flow1); + if (err) + goto err_destroy_merge_flow; + + err = nfp_flower_link_flows(merge_flow, sub_flow2); + if (err) + goto err_unlink_sub_flow1; + + merge_tc_off.cookie = merge_flow->tc_flower_cookie; + err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, + merge_flow->ingress_dev); + if (err) + goto err_unlink_sub_flow2; + + err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, + nfp_flower_table_params); + if (err) + goto err_release_metadata; + + err = nfp_flower_xmit_flow(app, merge_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_MOD); + if (err) + goto err_remove_rhash; + + merge_flow->in_hw = true; + sub_flow1->in_hw = false; + + return 0; + +err_remove_rhash: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &merge_flow->fl_node, + nfp_flower_table_params)); +err_release_metadata: + nfp_modify_flow_metadata(app, merge_flow); +err_unlink_sub_flow2: + nfp_flower_unlink_flows(merge_flow, sub_flow2); +err_unlink_sub_flow1: + nfp_flower_unlink_flows(merge_flow, sub_flow1); +err_destroy_merge_flow: + kfree(merge_flow->action_data); + kfree(merge_flow->mask_data); + kfree(merge_flow->unmasked_data); + kfree(merge_flow); + return err; +} + /** * nfp_flower_add_offload() - Adds a new flow to hardware. * @app: Pointer to the APP handle @@ -454,6 +919,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (port) port->tc_offload_cnt++; + flow_pay->in_hw = true; + /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); @@ -475,6 +942,75 @@ err_free_key_ls: return err; } +static void +nfp_flower_remove_merge_flow(struct nfp_app *app, + struct nfp_fl_payload *del_sub_flow, + struct nfp_fl_payload *merge_flow) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_payload_link *link, *temp; + struct nfp_fl_payload *origin; + bool mod = false; + int err; + + link = list_first_entry(&merge_flow->linked_flows, + struct nfp_fl_payload_link, merge_flow.list); + origin = link->sub_flow.flow; + + /* Re-add rule the merge had overwritten if it has not been deleted. */ + if (origin != del_sub_flow) + mod = true; + + err = nfp_modify_flow_metadata(app, merge_flow); + if (err) { + nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n"); + goto err_free_links; + } + + if (!mod) { + err = nfp_flower_xmit_flow(app, merge_flow, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + if (err) { + nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n"); + goto err_free_links; + } + } else { + __nfp_modify_flow_metadata(priv, origin); + err = nfp_flower_xmit_flow(app, origin, + NFP_FLOWER_CMSG_TYPE_FLOW_MOD); + if (err) + nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n"); + origin->in_hw = true; + } + +err_free_links: + /* Clean any links connected with the merged flow. */ + list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, + merge_flow.list) + nfp_flower_unlink_flow(link); + + kfree(merge_flow->action_data); + kfree(merge_flow->mask_data); + kfree(merge_flow->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &merge_flow->fl_node, + nfp_flower_table_params)); + kfree_rcu(merge_flow, rcu); +} + +static void +nfp_flower_del_linked_merge_flows(struct nfp_app *app, + struct nfp_fl_payload *sub_flow) +{ + struct nfp_fl_payload_link *link, *temp; + + /* Remove any merge flow formed from the deleted sub_flow. */ + list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, + sub_flow.list) + nfp_flower_remove_merge_flow(app, sub_flow, + link->merge_flow.flow); +} + /** * nfp_flower_del_offload() - Removes a flow from hardware. * @app: Pointer to the APP handle @@ -482,7 +1018,7 @@ err_free_key_ls: * @flow: TC flower classifier offload structure * * Removes a flow from the repeated hash structure and clears the - * action payload. + * action payload. Any flows merged from this are also deleted. * * Return: negative value on error, 0 if removed successfully. */ @@ -504,17 +1040,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, err = nfp_modify_flow_metadata(app, nfp_flow); if (err) - goto err_free_flow; + goto err_free_merge_flow; if (nfp_flow->nfp_tun_ipv4_addr) nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); + if (!nfp_flow->in_hw) { + err = 0; + goto err_free_merge_flow; + } + err = nfp_flower_xmit_flow(app, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); - if (err) - goto err_free_flow; + /* Fall through on error. */ -err_free_flow: +err_free_merge_flow: + nfp_flower_del_linked_merge_flows(app, nfp_flow); if (port) port->tc_offload_cnt--; kfree(nfp_flow->action_data); @@ -527,6 +1068,52 @@ err_free_flow: return err; } +static void +__nfp_flower_update_merge_stats(struct nfp_app *app, + struct nfp_fl_payload *merge_flow) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_payload_link *link; + struct nfp_fl_payload *sub_flow; + u64 pkts, bytes, used; + u32 ctx_id; + + ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); + pkts = priv->stats[ctx_id].pkts; + /* Do not cycle subflows if no stats to distribute. */ + if (!pkts) + return; + bytes = priv->stats[ctx_id].bytes; + used = priv->stats[ctx_id].used; + + /* Reset stats for the merge flow. */ + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; + + /* The merge flow has received stats updates from firmware. + * Distribute these stats to all subflows that form the merge. + * The stats will collected from TC via the subflows. + */ + list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { + sub_flow = link->sub_flow.flow; + ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); + priv->stats[ctx_id].pkts += pkts; + priv->stats[ctx_id].bytes += bytes; + max_t(u64, priv->stats[ctx_id].used, used); + } +} + +static void +nfp_flower_update_merge_stats(struct nfp_app *app, + struct nfp_fl_payload *sub_flow) +{ + struct nfp_fl_payload_link *link; + + /* Get merge flows that the subflow forms to distribute their stats. */ + list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) + __nfp_flower_update_merge_stats(app, link->merge_flow.flow); +} + /** * nfp_flower_get_stats() - Populates flow stats obtained from hardware. * @app: Pointer to the APP handle @@ -553,6 +1140,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); + /* If request is for a sub_flow, update stats from merged flows. */ + if (!list_empty(&nfp_flow->linked_flows)) + nfp_flower_update_merge_stats(app, nfp_flow); + flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, priv->stats[ctx_id].pkts, priv->stats[ctx_id].used); @@ -682,7 +1273,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, struct nfp_flower_priv *priv = app->priv; int err; - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS && + nfp_flower_internal_port_can_offload(app, netdev))) return -EOPNOTSUPP; switch (f->command) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 4d78be4ec4e9..faa06edf95ac 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) for (i = 0; i < count; i++) { ipv4_addr = payload->tun_info[i].ipv4; port = be32_to_cpu(payload->tun_info[i].egress_port); - netdev = nfp_app_repr_get(app, port); + netdev = nfp_app_dev_get(app, port, NULL); if (!netdev) continue; @@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) { struct nfp_tun_neigh payload; + u32 port_id; - /* Only offload representor IPv4s for now. */ - if (!nfp_netdev_is_nfp_repr(netdev)) + port_id = nfp_flower_get_port_id_from_netdev(app, netdev); + if (!port_id) return; memset(&payload, 0, sizeof(struct nfp_tun_neigh)); @@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, payload.src_ipv4 = flow->saddr; ether_addr_copy(payload.src_addr, netdev->dev_addr); neigh_ha_snapshot(payload.dst_addr, neigh, netdev); - payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev)); + payload.port_id = cpu_to_be32(port_id); /* Add destination of new route to NFP cache. */ nfp_tun_add_route_to_cache(app, payload.dst_ipv4); @@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) payload = nfp_flower_cmsg_get_data(skb); - netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); + netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); if (!netdev) goto route_fail_warning; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index f8d422713705..76d13af46a7a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm; * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock) * @sriov_enable: app-specific sriov initialisation * @sriov_disable: app-specific sriov clean-up - * @repr_get: get representor netdev + * @dev_get: get representor or internal port representing netdev */ struct nfp_app_type { enum nfp_app_id id; @@ -143,7 +143,8 @@ struct nfp_app_type { enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); int (*eswitch_mode_set)(struct nfp_app *app, u16 mode); - struct net_device *(*repr_get)(struct nfp_app *app, u32 id); + struct net_device *(*dev_get)(struct nfp_app *app, u32 id, + bool *redir_egress); }; /** @@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app) app->type->sriov_disable(app); } -static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) +static inline +struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id, + bool *redir_egress) { - if (unlikely(!app || !app->type->repr_get)) + if (unlikely(!app || !app->type->dev_get)) return NULL; - return app->type->repr_get(app, id); + return app->type->dev_get(app, id, redir_egress); } struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); @@ -433,6 +436,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_net *nn, unsigned int id); -struct devlink *nfp_devlink_get_devlink(struct net_device *netdev); +struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index e9eca99cf493..c50fce42f473 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -144,7 +144,8 @@ nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index, static int nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, u16 pool_index, - u32 size, enum devlink_sb_threshold_type threshold_type) + u32 size, enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); @@ -354,6 +355,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) { struct nfp_eth_table_port eth_port; struct devlink *devlink; + const u8 *serial; + int serial_len; int ret; rtnl_lock(); @@ -362,10 +365,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) if (ret) return ret; - devlink_port_type_eth_set(&port->dl_port, port->netdev); + serial_len = nfp_cpp_serial(port->app->cpp, &serial); devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, eth_port.label_port, eth_port.is_split, - eth_port.label_subport); + eth_port.label_subport, serial, serial_len); devlink = priv_to_devlink(app->pf); @@ -377,13 +380,23 @@ void nfp_devlink_port_unregister(struct nfp_port *port) devlink_port_unregister(&port->dl_port); } -struct devlink *nfp_devlink_get_devlink(struct net_device *netdev) +void nfp_devlink_port_type_eth_set(struct nfp_port *port) +{ + devlink_port_type_eth_set(&port->dl_port, port->netdev); +} + +void nfp_devlink_port_type_clear(struct nfp_port *port) { - struct nfp_app *app; + devlink_port_type_clear(&port->dl_port); +} + +struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev) +{ + struct nfp_port *port; - app = nfp_app_from_netdev(netdev); - if (!app) + port = nfp_port_from_netdev(netdev); + if (!port) return NULL; - return priv_to_devlink(app->pf); + return &port->dl_port; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index f4c8776e42b6..948d1a4b4643 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -294,6 +294,9 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev) static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) { + if (!pci_get_drvdata(pdev)) + return -ENOENT; + if (num_vfs == 0) return nfp_pcie_sriov_disable(pdev); else @@ -720,9 +723,13 @@ err_pci_disable: return err; } -static void nfp_pci_remove(struct pci_dev *pdev) +static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { - struct nfp_pf *pf = pci_get_drvdata(pdev); + struct nfp_pf *pf; + + pf = pci_get_drvdata(pdev); + if (!pf) + return; nfp_hwmon_unregister(pf); @@ -733,7 +740,7 @@ static void nfp_pci_remove(struct pci_dev *pdev) vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); - if (pf->fw_loaded) + if (unload_fw && pf->fw_loaded) nfp_fw_unload(pf); destroy_workqueue(pf->wq); @@ -749,11 +756,22 @@ static void nfp_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void nfp_pci_remove(struct pci_dev *pdev) +{ + __nfp_pci_shutdown(pdev, true); +} + +static void nfp_pci_shutdown(struct pci_dev *pdev) +{ + __nfp_pci_shutdown(pdev, false); +} + static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, + .shutdown = nfp_pci_shutdown, .sriov_configure = nfp_pcie_sriov_configure, }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index be37c2d6151c..df9aff2684ed 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -539,12 +539,17 @@ struct nfp_net_dp { * @shared_handler: Handler for shared interrupts * @shared_name: Name for shared interrupt * @me_freq_mhz: ME clock_freq (MHz) - * @reconfig_lock: Protects HW reconfiguration request regs/machinery + * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active, + * @reconfig_sync_present and HW reconfiguration request + * regs/machinery from async requests (sync must take + * @bar_lock) * @reconfig_posted: Pending reconfig bits coming from async sources * @reconfig_timer_active: Timer for reading reconfiguration results is pending * @reconfig_sync_present: Some thread is performing synchronous reconfig * @reconfig_timer: Timer for async reading of reconfig results * @reconfig_in_progress_update: Update FW is processing now (debug only) + * @bar_lock: vNIC config BAR access lock, protects: update, + * mailbox area * @link_up: Is the link up? * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter @@ -615,6 +620,8 @@ struct nfp_net { struct timer_list reconfig_timer; u32 reconfig_in_progress_update; + struct mutex bar_lock; + u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; u32 tx_coalesce_usecs; @@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn) spin_unlock_bh(&nn->r_vecs[0].lock); } +static inline void nn_ctrl_bar_lock(struct nfp_net *nn) +{ + mutex_lock(&nn->bar_lock); +} + +static inline void nn_ctrl_bar_unlock(struct nfp_net *nn) +{ + mutex_unlock(&nn->bar_lock); +} + /* Globals */ extern const char nfp_driver_version[]; @@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); -int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd); +int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size); +int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd); +int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd); unsigned int nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6d1b8816552e..b82b684f52ce 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/lockdep.h> #include <linux/mm.h> #include <linux/overflow.h> #include <linux/page_ref.h> @@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) return false; } -static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) +static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) { bool timed_out = false; + int i; + + /* Poll update field, waiting for NFP to ack the config. + * Do an opportunistic wait-busy loop, afterward sleep. + */ + for (i = 0; i < 50; i++) { + if (nfp_net_reconfig_check_done(nn, false)) + return false; + udelay(4); + } - /* Poll update field, waiting for NFP to ack the config */ while (!nfp_net_reconfig_check_done(nn, timed_out)) { - msleep(1); + usleep_range(250, 500); timed_out = time_is_before_eq_jiffies(deadline); } + return timed_out; +} + +static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) +{ + if (__nfp_net_reconfig_wait(nn, deadline)) + return -EIO; + if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) return -EIO; - return timed_out ? -EIO : 0; + return 0; } static void nfp_net_reconfig_timer(struct timer_list *t) @@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) } /** - * nfp_net_reconfig() - Reconfigure the firmware + * __nfp_net_reconfig() - Reconfigure the firmware * @nn: NFP Net device to reconfigure * @update: The value for the update field in the BAR config * @@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) * * Return: Negative errno on error, 0 on success */ -int nfp_net_reconfig(struct nfp_net *nn, u32 update) +static int __nfp_net_reconfig(struct nfp_net *nn, u32 update) { int ret; + lockdep_assert_held(&nn->bar_lock); + nfp_net_reconfig_sync_enter(nn); nfp_net_reconfig_start(nn, update); @@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) return ret; } +int nfp_net_reconfig(struct nfp_net *nn, u32 update) +{ + int ret; + + nn_ctrl_bar_lock(nn); + ret = __nfp_net_reconfig(nn, update); + nn_ctrl_bar_unlock(nn); + + return ret; +} + +int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size) +{ + if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) { + nn_err(nn, "mailbox too small for %u of data (%u)\n", + data_size, nn->tlv_caps.mbox_len); + return -EIO; + } + + nn_ctrl_bar_lock(nn); + return 0; +} + /** - * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox + * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox * @nn: NFP Net device to reconfigure * @mbox_cmd: The value for the mailbox command * @@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) * * Return: Negative errno on error, 0 on success */ -int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) +int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd) { u32 mbox = nn->tlv_caps.mbox_off; int ret; - if (!nfp_net_has_mbox(&nn->tlv_caps)) { - nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd); - return -EIO; - } - + lockdep_assert_held(&nn->bar_lock); nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd); - ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); + ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); if (ret) { nn_err(nn, "Mailbox update error\n"); return ret; @@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); } +int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd) +{ + int ret; + + ret = nfp_net_mbox_reconfig(nn, mbox_cmd); + nn_ctrl_bar_unlock(nn); + return ret; +} + /* Interrupt configuration and handling */ @@ -909,7 +957,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; - if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more)) + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more())) nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; @@ -1635,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) struct nfp_net_rx_buf *rxbuf; struct nfp_net_rx_desc *rxd; struct nfp_meta_parsed meta; + bool redir_egress = false; struct net_device *netdev; dma_addr_t new_dma_addr; u32 meta_len_xdp = 0; @@ -1770,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) struct nfp_net *nn; nn = netdev_priv(dp->netdev); - netdev = nfp_app_repr_get(nn->app, meta.portid); + netdev = nfp_app_dev_get(nn->app, meta.portid, + &redir_egress); if (unlikely(!netdev)) { nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); continue; } - nfp_repr_inc_rx_stats(netdev, pkt_len); + + if (nfp_netdev_is_nfp_repr(netdev)) + nfp_repr_inc_rx_stats(netdev, pkt_len); } skb = build_skb(rxbuf->frag, true_bufsz); @@ -1811,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) if (meta_len_xdp) skb_metadata_set(skb, meta_len_xdp); - napi_gro_receive(&rx_ring->r_vec->napi, skb); + if (likely(!redir_egress)) { + napi_gro_receive(&rx_ring->r_vec->napi, skb); + } else { + skb->dev = netdev; + __skb_push(skb, ETH_HLEN); + dev_queue_xmit(skb); + } } if (xdp_prog) { @@ -3111,7 +3169,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) static int nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { + const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD; struct nfp_net *nn = netdev_priv(netdev); + int err; /* Priority tagged packets with vlan id 0 are processed by the * NFP as untagged packets @@ -3119,17 +3179,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ); + if (err) + return err; + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); - return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); } static int nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { + const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL; struct nfp_net *nn = netdev_priv(netdev); + int err; /* Priority tagged packets with vlan id 0 are processed by the * NFP as untagged packets @@ -3137,11 +3203,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) if (!vid) return 0; + err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ); + if (err) + return err; + nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid); nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); - return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); + return nfp_net_mbox_reconfig_and_unlock(nn, cmd); } static void nfp_net_stat64(struct net_device *netdev, @@ -3324,8 +3394,11 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) struct nfp_net *nn = netdev_priv(netdev); int n; + /* If port is defined, devlink_port is registered and devlink core + * is taking care of name formatting. + */ if (nn->port) - return nfp_port_get_phys_port_name(netdev, name, len); + return -EOPNOTSUPP; if (nn->dp.is_vf || nn->vnic_no_name) return -EOPNOTSUPP; @@ -3517,6 +3590,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, + .ndo_set_vf_trust = nfp_app_set_vf_trust, .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_setup_tc = nfp_port_setup_tc, @@ -3530,8 +3604,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_bpf = nfp_net_xdp, - .ndo_get_port_parent_id = nfp_port_get_port_parent_id, - .ndo_get_devlink = nfp_devlink_get_devlink, + .ndo_get_devlink_port = nfp_devlink_get_devlink_port, }; /** @@ -3548,7 +3621,7 @@ void nfp_net_info(struct nfp_net *nn) nn->fw_ver.resv, nn->fw_ver.class, nn->fw_ver.major, nn->fw_ver.minor, nn->max_mtu); - nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", nn->cap, nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", @@ -3564,7 +3637,6 @@ void nfp_net_info(struct nfp_net *nn) nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "", nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "", nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "", - nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", @@ -3632,6 +3704,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; + mutex_init(&nn->bar_lock); + spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->link_status_lock); @@ -3659,6 +3733,9 @@ err_free_nn: void nfp_net_free(struct nfp_net *nn) { WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); + + mutex_destroy(&nn->bar_lock); + if (nn->dp.netdev) free_netdev(nn->dp.netdev); else @@ -3920,9 +3997,6 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } - if (nn->dp.netdev) - nfp_net_netdev_init(nn); - /* Stash the re-configuration queue away. First odd queue in TX Bar */ nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; @@ -3935,6 +4009,9 @@ int nfp_net_init(struct nfp_net *nn) if (err) return err; + if (nn->dp.netdev) + nfp_net_netdev_init(nn); + nfp_net_vecs_init(nn); if (!nn->dp.netdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 372adea10e14..25919e338071 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -104,8 +104,6 @@ #define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */ #define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */ #define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/ -#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */ -#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */ #define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */ #define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */ #define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */ @@ -130,7 +128,6 @@ #define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */ #define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */ #define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */ -#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */ #define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ #define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ #define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ @@ -392,7 +389,6 @@ #define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0 #define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4 #define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8 -#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 @@ -498,10 +494,4 @@ struct nfp_net_tlv_caps { int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, struct nfp_net_tlv_caps *caps); - -static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps) -{ - return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN; -} - #endif /* _NFP_NET_CTRL_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 690b62718dbb..851e31e0ba8e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -18,6 +18,7 @@ #include <linux/pci.h> #include <linux/ethtool.h> #include <linux/firmware.h> +#include <linux/sfp.h> #include "nfpcore/nfp.h" #include "nfpcore/nfp_nsp.h" @@ -152,6 +153,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { #define NN_RVEC_GATHER_STATS 9 #define NN_RVEC_PER_Q_STATS 3 +#define SFP_SFF_REV_COMPLIANCE 1 + static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) { struct nfp_nsp *nsp; @@ -1096,6 +1099,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, buffer); } +static int +nfp_port_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + unsigned int read_len; + struct nfp_nsp *nsp; + int err = 0; + u8 data; + + port = nfp_port_from_netdev(netdev); + eth_port = nfp_port_get_eth_port(port); + if (!eth_port) + return -EOPNOTSUPP; + + nsp = nfp_nsp_open(port->app->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + netdev_err(netdev, "Failed to access the NSP: %d\n", err); + return err; + } + + if (!nfp_nsp_has_read_module_eeprom(nsp)) { + netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n"); + err = -EOPNOTSUPP; + goto exit_close_nsp; + } + + switch (eth_port->interface) { + case NFP_INTERFACE_SFP: + case NFP_INTERFACE_SFP28: + err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, + SFP_SFF8472_COMPLIANCE, &data, + 1, &read_len); + if (err < 0) + goto exit_close_nsp; + + if (!data) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + case NFP_INTERFACE_QSFP: + err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, + SFP_SFF_REV_COMPLIANCE, &data, + 1, &read_len); + if (err < 0) + goto exit_close_nsp; + + if (data < 0x3) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } + break; + case NFP_INTERFACE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + netdev_err(netdev, "Unsupported module 0x%x detected\n", + eth_port->interface); + err = -EINVAL; + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_port_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + struct nfp_nsp *nsp; + int err; + + port = nfp_port_from_netdev(netdev); + eth_port = __nfp_port_get_eth_port(port); + if (!eth_port) + return -EOPNOTSUPP; + + nsp = nfp_nsp_open(port->app->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + netdev_err(netdev, "Failed to access the NSP: %d\n", err); + return err; + } + + if (!nfp_nsp_has_read_module_eeprom(nsp)) { + netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n"); + err = -EOPNOTSUPP; + goto exit_close_nsp; + } + + err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, + eeprom->offset, data, eeprom->len, + &eeprom->len); + if (err < 0) { + if (eeprom->len) { + netdev_warn(netdev, + "Incomplete read from module EEPROM: %d\n", + err); + err = 0; + } else { + netdev_err(netdev, + "Reading from module EEPROM failed: %d\n", + err); + } + } + +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + static int nfp_net_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -1253,6 +1380,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, + .get_module_info = nfp_port_get_module_info, + .get_module_eeprom = nfp_port_get_module_eeprom, .get_coalesce = nfp_net_get_coalesce, .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, @@ -1272,6 +1401,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = { .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, + .get_module_info = nfp_port_get_module_info, + .get_module_eeprom = nfp_port_get_module_eeprom, .get_link_ksettings = nfp_net_get_link_ksettings, .set_link_ksettings = nfp_net_set_link_ksettings, .get_fecparam = nfp_port_get_fecparam, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 08f5fdbd8e41..986464d4a206 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -150,34 +150,39 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) nn->id = id; + if (nn->port) { + err = nfp_devlink_port_register(pf->app, nn->port); + if (err) + return err; + } + err = nfp_net_init(nn); if (err) - return err; + goto err_devlink_port_clean; nfp_net_debugfs_vnic_add(nn, pf->ddir); - if (nn->port) { - err = nfp_devlink_port_register(pf->app, nn->port); - if (err) - goto err_dfs_clean; - } + if (nn->port) + nfp_devlink_port_type_eth_set(nn->port); nfp_net_info(nn); if (nfp_net_is_data_vnic(nn)) { err = nfp_app_vnic_init(pf->app, nn); if (err) - goto err_devlink_port_clean; + goto err_devlink_port_type_clean; } return 0; -err_devlink_port_clean: +err_devlink_port_type_clean: if (nn->port) - nfp_devlink_port_unregister(nn->port); -err_dfs_clean: + nfp_devlink_port_type_clear(nn->port); nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_clean(nn); +err_devlink_port_clean: + if (nn->port) + nfp_devlink_port_unregister(nn->port); return err; } @@ -221,9 +226,11 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn) if (nfp_net_is_data_vnic(nn)) nfp_app_vnic_clean(pf->app, nn); if (nn->port) - nfp_devlink_port_unregister(nn->port); + nfp_devlink_port_type_clear(nn->port); nfp_net_debugfs_dir_clean(&nn->debugfs_dir); nfp_net_clean(nn); + if (nn->port) + nfp_devlink_port_unregister(nn->port); } static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 94d228c04496..036edcc1fa18 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -267,13 +267,13 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, + .ndo_set_vf_trust = nfp_app_set_vf_trust, .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_fix_features = nfp_repr_fix_features, .ndo_set_features = nfp_port_set_features, .ndo_set_mac_address = eth_mac_addr, - .ndo_get_port_parent_id = nfp_port_get_port_parent_id, - .ndo_get_devlink = nfp_devlink_get_devlink, + .ndo_get_devlink_port = nfp_devlink_get_devlink_port, }; void diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index b6ec46ed0540..3fdaaf8ed2ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -/* Copyright (C) 2017 Netronome Systems, Inc. */ +/* Copyright (C) 2017-2019 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/errno.h> @@ -146,6 +146,30 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) "spoofchk"); } +int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + unsigned int vf_offset; + u8 vf_ctrl; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, + "trust"); + if (err) + return err; + + /* Write trust control bit to VF entry in VF config symbol */ + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + + NFP_NET_VF_CFG_CTRL; + vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); + vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_TRUST; + vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_TRUST, enable); + writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_TRUST, + "trust"); +} + int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int link_state) { @@ -213,6 +237,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); + ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index c9f09c5bb5ee..a3db0cbf6425 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ -/* Copyright (C) 2017 Netronome Systems, Inc. */ +/* Copyright (C) 2017-2019 Netronome Systems, Inc. */ #ifndef _NFP_NET_SRIOV_H_ #define _NFP_NET_SRIOV_H_ @@ -19,12 +19,14 @@ #define NFP_NET_VF_CFG_MB_CAP_VLAN (0x1 << 1) #define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3) +#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) #define NFP_NET_VF_CFG_MB_UPD_VLAN (0x1 << 1) #define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3) +#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -35,6 +37,7 @@ #define NFP_NET_VF_CFG_MAC_HI 0x0 #define NFP_NET_VF_CFG_MAC_LO 0x6 #define NFP_NET_VF_CFG_CTRL 0x4 +#define NFP_NET_VF_CFG_CTRL_TRUST 0x8 #define NFP_NET_VF_CFG_CTRL_SPOOF 0x4 #define NFP_NET_VF_CFG_CTRL_LINK_STATE 0x3 #define NFP_NET_VF_CFG_LS_MODE_AUTO 0 @@ -48,6 +51,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto); int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting); int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int link_state); int nfp_app_get_vf_config(struct net_device *netdev, int vf, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 1145849ca7ba..e4977cdf7678 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -282,8 +282,14 @@ err_free_vf: static void nfp_netvf_pci_remove(struct pci_dev *pdev) { - struct nfp_net_vf *vf = pci_get_drvdata(pdev); - struct nfp_net *nn = vf->nn; + struct nfp_net_vf *vf; + struct nfp_net *nn; + + vf = pci_get_drvdata(pdev); + if (!vf) + return; + + nn = vf->nn; /* Note, the order is slightly different from above as we need * to keep the nn pointer around till we have freed everything. @@ -317,4 +323,5 @@ struct pci_driver nfp_netvf_pci_driver = { .id_table = nfp_netvf_pci_device_ids, .probe = nfp_netvf_pci_probe, .remove = nfp_netvf_pci_remove, + .shutdown = nfp_netvf_pci_remove, }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 93c5bfc0510b..fcd16877e6e0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -30,22 +30,6 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev) return NULL; } -int nfp_port_get_port_parent_id(struct net_device *netdev, - struct netdev_phys_item_id *ppid) -{ - struct nfp_port *port; - const u8 *serial; - - port = nfp_port_from_netdev(netdev); - if (!port) - return -EOPNOTSUPP; - - ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial); - memcpy(&ppid->id, serial, ppid->id_len); - - return 0; -} - int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 90ae053f5c07..d7fd203bb180 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -131,6 +131,8 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf); int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port); void nfp_devlink_port_unregister(struct nfp_port *port); +void nfp_devlink_port_type_eth_set(struct nfp_port *port); +void nfp_devlink_port_type_clear(struct nfp_port *port); /** * Mac stats (0x0000 - 0x0200) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 3a4e224a64b7..42cf4fd875ea 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -79,6 +79,8 @@ #define NFP_VERSIONS_NCSI_OFF 22 #define NFP_VERSIONS_CFGR_OFF 26 +#define NSP_SFF_EEPROM_BLOCK_LEN 8 + enum nfp_nsp_cmd { SPCODE_NOOP = 0, /* No operation */ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ @@ -95,6 +97,7 @@ enum nfp_nsp_cmd { SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ SPCODE_VERSIONS = 21, /* Report FW versions */ + SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ }; struct nfp_nsp_dma_buf { @@ -965,3 +968,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash, return (const char *)&buf[buf_off]; } + +static int +__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg module_eeprom = { + { + .code = SPCODE_READ_SFF_EEPROM, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &module_eeprom); +} + +int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, + unsigned int offset, void *data, + unsigned int len, unsigned int *read_len) +{ + struct eeprom_buf { + u8 metalen; + __le16 length; + __le16 offset; + __le16 readlen; + u8 eth_index; + u8 data[0]; + } __packed *buf; + int bufsz, ret; + + BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8); + + /* Buffer must be large enough and rounded to the next block size. */ + bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN)); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->metalen = + offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN; + buf->length = cpu_to_le16(len); + buf->offset = cpu_to_le16(offset); + buf->eth_index = eth_index; + + ret = __nfp_nsp_module_eeprom(state, buf, bufsz); + + *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen)); + if (*read_len) + memcpy(data, buf->data, *read_len); + + if (!ret && *read_len < len) + ret = -EIO; + + kfree(buf); + + return ret; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index bd9c358c646f..22ee6985ee1c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -22,6 +22,9 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); int nfp_nsp_load_stored_fw(struct nfp_nsp *state); int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, + unsigned int offset, void *data, + unsigned int len, unsigned int *read_len); static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) { @@ -43,6 +46,11 @@ static inline bool nfp_nsp_has_versions(struct nfp_nsp *state) return nfp_nsp_get_abi_ver_minor(state) > 27; } +static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 28; +} + enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, NFP_INTERFACE_SFP = 1, diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index a5bf46310f60..5ffaee9f53b1 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1355,7 +1355,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, const int nh_off = skb_network_offset(skb); const int nh_len = skb_network_header_len(skb); const int nfrags = skb_shinfo(skb)->nr_frags; - int cs_size, i, fill, hdr, cpyhdr, evt; + int cs_size, i, fill, hdr, evt; dma_addr_t csdma; fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | @@ -1396,7 +1396,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, fill++; /* Copy the result into the TCP packet */ - cpyhdr = fill; CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | XCT_FUN_LLEN(2) | XCT_FUN_SE; CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; @@ -1839,7 +1838,7 @@ static void __exit pasemi_mac_cleanup_module(void) pci_unregister_driver(&pasemi_mac_driver); } -int pasemi_mac_init_module(void) +static int pasemi_mac_init_module(void) { int err; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 127c89b22ef0..c5e96ce20f59 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -496,6 +496,9 @@ enum qed_mf_mode_bit { /* Allow DSCP to TC mapping */ QED_MF_DSCP_TO_TC_MAP, + + /* Do not insert a vlan tag with id 0 */ + QED_MF_DONT_ADD_VLAN0_TAG, }; enum qed_ufp_mode { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 69966dfc6e3d..5c6a276f69ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -204,9 +204,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; - /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ - if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || - test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) + if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits)) p_data->arr[type].dont_add_vlan0 = true; /* QM reconf data */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 866cdc86a3f2..fccdb06fc5c5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3140,12 +3140,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_UFP_SPECIFIC) | - BIT(QED_MF_8021Q_TAGGING); + BIT(QED_MF_8021Q_TAGGING) | + BIT(QED_MF_DONT_ADD_VLAN0_TAG); break; case NVM_CFG1_GLOB_MF_MODE_BD: cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | - BIT(QED_MF_8021AD_TAGGING); + BIT(QED_MF_8021AD_TAGGING) | + BIT(QED_MF_DONT_ADD_VLAN0_TAG); break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 63a78162cfaf..92fe226980fd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -498,8 +498,7 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback); + struct net_device *sb_dev); netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b4c8949933f1..f0a2ca23f63a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -652,9 +652,9 @@ static void qede_get_drvinfo(struct net_device *ndev, { char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; struct qede_dev *edev = netdev_priv(ndev); + char mbi[ETHTOOL_FWVERS_LEN]; strlcpy(info->driver, "qede", sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", edev->dev_info.common.fw_major, @@ -668,13 +668,27 @@ static void qede_get_drvinfo(struct net_device *ndev, (edev->dev_info.common.mfw_rev >> 8) & 0xFF, edev->dev_info.common.mfw_rev & 0xFF); - if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) < - sizeof(info->fw_version)) { + if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) < + sizeof(info->version)) + snprintf(info->version, sizeof(info->version), + "%s [storm %s]", DRV_MODULE_VERSION, storm); + else + snprintf(info->version, sizeof(info->version), + "%s %s", DRV_MODULE_VERSION, storm); + + if (edev->dev_info.common.mbi_version) { + snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d", + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET, + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET, + (edev->dev_info.common.mbi_version & + QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET); snprintf(info->fw_version, sizeof(info->fw_version), - "mfw %s storm %s", mfw, storm); + "mbi %s [mfw %s]", mbi, mfw); } else { snprintf(info->fw_version, sizeof(info->fw_version), - "%s %s", mfw, storm); + "mfw %s", mfw); } strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 31b046e24565..954015d2011a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1665,12 +1665,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq)) qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { - if (skb->xmit_more) + if (netdev_xmit_more()) qede_update_tx_producer(txq); netif_tx_stop_queue(netdev_txq); @@ -1696,8 +1696,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) } u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct qede_dev *edev = netdev_priv(dev); int total_txq; @@ -1705,7 +1704,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; return QEDE_TSS_COUNT(edev) ? - fallback(dev, skb, NULL) % total_txq : 0; + netdev_pick_tx(dev, skb, NULL) % total_txq : 0; } /* 8B udp header + 8B base tunnel header + 32B option length */ diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 04aa592f35c3..ad335bca3273 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* Trigger the MAC to check the TX descriptor */ - if (!skb->xmit_more || netif_queue_stopped(dev)) + if (!netdev_xmit_more() || netif_queue_stopped(dev)) iowrite16(TM2TX, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ed651dde6ef9..122b9bf9dc70 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -491,10 +491,6 @@ enum rtl_register_content { PCIDAC = (1 << 4), PCIMulRW = (1 << 3), #define INTT_MASK GENMASK(1, 0) - INTT_0 = 0x0000, // 8168 - INTT_1 = 0x0001, // 8168 - INTT_2 = 0x0002, // 8168 - INTT_3 = 0x0003, // 8168 /* rtl8169_PHYstatus */ TBI_Enable = 0x80, @@ -703,6 +699,8 @@ struct rtl8169_private { u32 ocp_base; }; +typedef void (*rtl_generic_fct)(struct rtl8169_private *tp); + MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); module_param_named(debug, debug.msg_enable, int, 0); @@ -1067,8 +1065,8 @@ DECLARE_RTL_COND(rtl_eriar_cond) return RTL_R32(tp, ERIAR) & ERIAR_FLAG; } -static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, - u32 val, int type) +static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) { BUG_ON((addr & 3) || (mask == 0)); RTL_W32(tp, ERIDR, val); @@ -1077,7 +1075,13 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); } -static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val) +{ + _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC); +} + +static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) { RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); @@ -1085,13 +1089,30 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) RTL_R32(tp, ERIDR) : ~0; } +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr) +{ + return _rtl_eri_read(tp, addr, ERIAR_EXGMAC); +} + static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, - u32 m, int type) + u32 m) { u32 val; - val = rtl_eri_read(tp, addr, type); - rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); + val = rtl_eri_read(tp, addr); + rtl_eri_write(tp, addr, mask, (val & ~m) | p); +} + +static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask, + u32 p) +{ + rtl_w0w1_eri(tp, addr, mask, p, 0); +} + +static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask, + u32 m) +{ + rtl_w0w1_eri(tp, addr, mask, 0, m); } static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) @@ -1103,7 +1124,7 @@ static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { - return rtl_eri_read(tp, reg, ERIAR_OOB); + return _rtl_eri_read(tp, reg, ERIAR_OOB); } static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, @@ -1117,13 +1138,13 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { - rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, - data, ERIAR_OOB); + _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); } static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) { - rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd); r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); } @@ -1259,6 +1280,12 @@ static bool r8168_check_dash(struct rtl8169_private *tp) } } +static void rtl_reset_packet_filter(struct rtl8169_private *tp) +{ + rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); + rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0)); +} + struct exgmac_reg { u16 addr; u16 mask; @@ -1269,7 +1296,7 @@ static void rtl_write_exgmac_batch(struct rtl8169_private *tp, const struct exgmac_reg *r, int len) { while (len-- > 0) { - rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); + rtl_eri_write(tp, r->addr, r->mask, r->val); r++; } } @@ -1327,48 +1354,31 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) if (tp->mac_version == RTL_GIGA_MAC_VER_34 || tp->mac_version == RTL_GIGA_MAC_VER_38) { if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else if (phydev->speed == SPEED_100) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); } - /* Reset packet filter */ - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, - ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, - ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || tp->mac_version == RTL_GIGA_MAC_VER_36) { if (phydev->speed == SPEED_1000) { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005); } else { - rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f); } } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { if (phydev->speed == SPEED_10) { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, - ERIAR_EXGMAC); - rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a); } else { - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, - ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); } } } @@ -1409,19 +1419,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: tmp = ARRAY_SIZE(cfg) - 1; if (wolopts & WAKE_MAGIC) - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - MagicPacket_v2, - 0x0000, - ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100, + MagicPacket_v2); else - rtl_w0w1_eri(tp, - 0x0dc, - ERIAR_MASK_0100, - 0x0000, - MagicPacket_v2, - ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100, + MagicPacket_v2); break; default: tmp = ARRAY_SIZE(cfg); @@ -2564,7 +2566,7 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) static void rtl8168_config_eee_mac(struct rtl8169_private *tp) { - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003); } static void rtl8168f_config_eee_phy(struct rtl8169_private *tp) @@ -3961,7 +3963,7 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp) rtl_apply_firmware(tp); /* EEE setting */ - rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); rtl_writephy(tp, 0x1f, 0x0004); rtl_writephy(tp, 0x10, 0x401f); rtl_writephy(tp, 0x19, 0x7030); @@ -3984,139 +3986,73 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) rtl_apply_firmware(tp); - rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000); rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); - rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); } static void rtl_hw_phy_config(struct net_device *dev) { + static const rtl_generic_fct phy_configs[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = NULL, + [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config, + [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config, + [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config, + [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config, + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, + [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = NULL, + [RTL_GIGA_MAC_VER_15] = NULL, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, + [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, + [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config, + [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config, + [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config, + [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config, + [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, + [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, + [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, + [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, + [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, + [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, + [RTL_GIGA_MAC_VER_31] = NULL, + [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config, + [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config, + [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config, + [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config, + [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config, + [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config, + [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config, + [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config, + [RTL_GIGA_MAC_VER_41] = NULL, + [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config, + [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config, + [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, + [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config, + [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, + [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, + }; struct rtl8169_private *tp = netdev_priv(dev); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - break; - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - rtl8169s_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_04: - rtl8169sb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_05: - rtl8169scd_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_06: - rtl8169sce_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - rtl8102e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_11: - rtl8168bb_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_12: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_17: - rtl8168bef_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_18: - rtl8168cp_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_19: - rtl8168c_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_20: - rtl8168c_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_21: - rtl8168c_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_22: - rtl8168c_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - rtl8168cp_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_25: - rtl8168d_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_26: - rtl8168d_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_27: - rtl8168d_3_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_28: - rtl8168d_4_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - rtl8105e_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_31: - /* None. */ - break; - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl8168e_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl8168e_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_35: - rtl8168f_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_36: - rtl8168f_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl8402_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl8411_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl8106e_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_40: - rtl8168g_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - rtl8168g_2_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_47: - rtl8168h_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_48: - rtl8168h_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl8168ep_1_hw_phy_config(tp); - break; - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - rtl8168ep_2_hw_phy_config(tp); - break; - - case RTL_GIGA_MAC_VER_41: - default: - break; - } + if (phy_configs[tp->mac_version]) + phy_configs[tp->mac_version](tp); } static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) @@ -4147,14 +4083,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) phy_speed_up(tp->phydev); genphy_soft_reset(tp->phydev); - - /* It was reported that several chips end up with 10MBit/Half on a - * 1GBit link after resuming from S3. For whatever reason the PHY on - * these chips doesn't properly start a renegotiation when soft-reset. - * Explicitly requesting a renegotiation fixes this. - */ - if (tp->phydev->autoneg == AUTONEG_ENABLE) - phy_restart_aneg(tp->phydev); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4283,8 +4211,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, - 0xfc000000, ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; } @@ -4312,8 +4239,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_49: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, - 0x00000000, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000); break; } @@ -4703,6 +4629,8 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); + /* disable interrupt coalescing */ + RTL_W16(tp, IntrMitigate, 0x0000); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(tp, IntrMask); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); @@ -4735,12 +4663,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp) rtl8169_set_magic_reg(tp, tp->mac_version); - /* - * Undocumented corner. Supposedly: - * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets - */ - RTL_W16(tp, IntrMitigate, 0x0000); - RTL_W32(tp, RxMissed, 0); } @@ -5068,14 +4990,14 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) if (tp->dev->mtu <= ETH_DATA_LEN) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060); + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00); RTL_W8(tp, MaxTxPacketSize, EarlySize); @@ -5101,16 +5023,15 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006); + rtl_reset_packet_filter(tp); + rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4)); + rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4)); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060); RTL_W8(tp, MaxTxPacketSize, EarlySize); @@ -5137,7 +5058,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00); /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); @@ -5157,37 +5078,36 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00); } static void rtl_hw_start_8168g(struct rtl8169_private *tp) { - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006); rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06); + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); rtl_pcie_state_l2l3_disable(tp); } @@ -5261,29 +5181,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006); rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4)); - rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00); - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); @@ -5295,7 +5214,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12)); rtl_pcie_state_l2l3_disable(tp); @@ -5345,34 +5264,33 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) { rtl8168ep_stop_cmac(tp); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006); rtl_set_def_aspm_entry_latency(tp); rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_reset_packet_filter(tp); - rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80); - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); RTL_W8(tp, MaxTxPacketSize, EarlySize); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); @@ -5453,128 +5371,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, true); } -static void rtl_hw_start_8168(struct rtl8169_private *tp) -{ - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - - tp->cp_cmd &= ~INTT_MASK; - tp->cp_cmd |= PktCntrDisable | INTT_1; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - - RTL_W16(tp, IntrMitigate, 0x5100); - - /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { - tp->irq_mask |= RxFIFOOver; - tp->irq_mask &= ~RxOverflow; - } - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_11: - rtl_hw_start_8168bb(tp); - break; - - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - rtl_hw_start_8168bef(tp); - break; - - case RTL_GIGA_MAC_VER_18: - rtl_hw_start_8168cp_1(tp); - break; - - case RTL_GIGA_MAC_VER_19: - rtl_hw_start_8168c_1(tp); - break; - - case RTL_GIGA_MAC_VER_20: - rtl_hw_start_8168c_2(tp); - break; - - case RTL_GIGA_MAC_VER_21: - rtl_hw_start_8168c_3(tp); - break; - - case RTL_GIGA_MAC_VER_22: - rtl_hw_start_8168c_4(tp); - break; - - case RTL_GIGA_MAC_VER_23: - rtl_hw_start_8168cp_2(tp); - break; - - case RTL_GIGA_MAC_VER_24: - rtl_hw_start_8168cp_3(tp); - break; - - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - rtl_hw_start_8168d(tp); - break; - - case RTL_GIGA_MAC_VER_28: - rtl_hw_start_8168d_4(tp); - break; - - case RTL_GIGA_MAC_VER_31: - rtl_hw_start_8168dp(tp); - break; - - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - rtl_hw_start_8168e_1(tp); - break; - case RTL_GIGA_MAC_VER_34: - rtl_hw_start_8168e_2(tp); - break; - - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - rtl_hw_start_8168f_1(tp); - break; - - case RTL_GIGA_MAC_VER_38: - rtl_hw_start_8411(tp); - break; - - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - rtl_hw_start_8168g_1(tp); - break; - case RTL_GIGA_MAC_VER_42: - rtl_hw_start_8168g_2(tp); - break; - - case RTL_GIGA_MAC_VER_44: - rtl_hw_start_8411_2(tp); - break; - - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - rtl_hw_start_8168h_1(tp); - break; - - case RTL_GIGA_MAC_VER_49: - rtl_hw_start_8168ep_1(tp); - break; - - case RTL_GIGA_MAC_VER_50: - rtl_hw_start_8168ep_2(tp); - break; - - case RTL_GIGA_MAC_VER_51: - rtl_hw_start_8168ep_3(tp); - break; - - default: - netif_err(tp, drv, tp->dev, - "unknown chipset (mac_version = %d)\n", - tp->mac_version); - break; - } -} - static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8102e_1[] = { @@ -5674,13 +5470,12 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); - rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006); + rtl_reset_packet_filter(tp); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00); rtl_pcie_state_l2l3_disable(tp); } @@ -5700,6 +5495,73 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, true); } +static void rtl_hw_config(struct rtl8169_private *tp) +{ + static const rtl_generic_fct hw_configs[] = { + [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1, + [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, + [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, + [RTL_GIGA_MAC_VER_10] = NULL, + [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb, + [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef, + [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = NULL, + [RTL_GIGA_MAC_VER_15] = NULL, + [RTL_GIGA_MAC_VER_16] = NULL, + [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef, + [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, + [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1, + [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2, + [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3, + [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4, + [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2, + [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, + [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, + [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, + [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, + [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, + [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp, + [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1, + [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2, + [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1, + [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402, + [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411, + [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106, + [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1, + [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2, + [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2, + [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, + [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1, + [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, + [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, + }; + + if (hw_configs[tp->mac_version]) + hw_configs[tp->mac_version](tp); +} + +static void rtl_hw_start_8168(struct rtl8169_private *tp) +{ + RTL_W8(tp, MaxTxPacketSize, TxPacketMax); + + /* Workaround for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->irq_mask |= RxFIFOOver; + tp->irq_mask &= ~RxOverflow; + } + + rtl_hw_config(tp); +} + static void rtl_hw_start_8101(struct rtl8169_private *tp) { if (tp->mac_version >= RTL_GIGA_MAC_VER_30) @@ -5715,43 +5577,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp) tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - rtl_hw_start_8102e_1(tp); - break; - - case RTL_GIGA_MAC_VER_08: - rtl_hw_start_8102e_3(tp); - break; - - case RTL_GIGA_MAC_VER_09: - rtl_hw_start_8102e_2(tp); - break; - - case RTL_GIGA_MAC_VER_29: - rtl_hw_start_8105e_1(tp); - break; - case RTL_GIGA_MAC_VER_30: - rtl_hw_start_8105e_2(tp); - break; - - case RTL_GIGA_MAC_VER_37: - rtl_hw_start_8402(tp); - break; - - case RTL_GIGA_MAC_VER_39: - rtl_hw_start_8106(tp); - break; - case RTL_GIGA_MAC_VER_43: - rtl_hw_start_8168g_2(tp); - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - rtl_hw_start_8168h_1(tp); - break; - } - - RTL_W16(tp, IntrMitigate, 0x0000); + rtl_hw_config(tp); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@ -6268,7 +6094,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, */ smp_mb(); if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) - netif_wake_queue(dev); + netif_start_queue(dev); } return NETDEV_TX_OK; @@ -6543,10 +6369,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); } - if (status & (RTL_EVENT_NAPI | LinkChg)) { - rtl_irq_disable(tp); - napi_schedule_irqoff(&tp->napi); - } + rtl_irq_disable(tp); + napi_schedule_irqoff(&tp->napi); out: rtl_ack_events(tp, status); @@ -7123,13 +6947,13 @@ static void rtl_read_mac_address(struct rtl8169_private *tp, switch (tp->mac_version) { case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + value = rtl_eri_read(tp, 0xe0); mac_addr[0] = (value >> 0) & 0xff; mac_addr[1] = (value >> 8) & 0xff; mac_addr[2] = (value >> 16) & 0xff; mac_addr[3] = (value >> 24) & 0xff; - value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + value = rtl_eri_read(tp, 0xe4); mac_addr[4] = (value >> 0) & 0xff; mac_addr[5] = (value >> 8) & 0xff; break; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8154b38c08f7..9618c4881c83 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1615,8 +1615,7 @@ drop: } static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { /* If skb needs TX timestamp, it is handled in network control queue */ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : @@ -1970,6 +1969,13 @@ static void ravb_set_config_mode(struct net_device *ndev) } } +static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { + { .soc_id = "r8a774c0" }, + { .soc_id = "r8a77990" }, + { .soc_id = "r8a77995" }, + { /* sentinel */ } +}; + /* Set tx and rx clock internal delay modes */ static void ravb_set_delay_mode(struct net_device *ndev) { @@ -1981,8 +1987,12 @@ static void ravb_set_delay_mode(struct net_device *ndev) set |= APSR_DM_RDM; if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) - set |= APSR_DM_TDM; + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), + "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", + phy_modes(priv->phy_interface))) + set |= APSR_DM_TDM; + } ravb_modify(ndev, APSR, APSR_DM, set); } diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index a71c900ca04f..7ae6c124bfe9 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2207,6 +2207,15 @@ static int rocker_router_fib_event(struct notifier_block *nb, switch (event) { case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: + if (info->family == AF_INET) { + struct fib_entry_notifier_info *fen_info = ptr; + + if (fen_info->fi->fib_nh_is_v6) { + NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); + return notifier_from_errno(-EINVAL); + } + } + memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); /* Take referece on fib_info to prevent it from being * freed while work is queued. Release it afterwards. diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index fa296a7c255d..30a49802fb51 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2288,11 +2288,11 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst, nh = fi->fib_nh; nh_on_port = (fi->fib_dev == ofdpa_port->dev); - has_gw = !!nh->nh_gw; + has_gw = !!nh->fib_nh_gw4; if (has_gw && nh_on_port) { err = ofdpa_port_ipv4_nh(ofdpa_port, flags, - nh->nh_gw, &index); + nh->fib_nh_gw4, &index); if (err) return err; @@ -2749,7 +2749,7 @@ static int ofdpa_fib4_add(struct rocker *rocker, fen_info->tb_id, 0); if (err) return err; - fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD; + fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; return 0; } @@ -2764,7 +2764,7 @@ static int ofdpa_fib4_del(struct rocker *rocker, ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; - fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); @@ -2791,7 +2791,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker) rocker); if (!ofdpa_port) continue; - flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, flow_entry); } diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 3409bbf5b19f..c5059f456f37 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) netdev_tx_sent_queue(tx_queue->core_txq, skb_len); /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) { struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue); /* There could be packets left on the partner queue if those @@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) ef4_nic_push_buffers(tx_queue); } else { - tx_queue->xmit_more_available = skb->xmit_more; + tx_queue->xmit_more_available = netdev_xmit_more(); } tx_queue->tx_packets++; diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 9382bb0b4d5a..a4bbfebe3d64 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -342,6 +342,7 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx, break; default: WARN_ON(1); + /* Fall through */ case MC_CMD_FCNTL_OFF: link_state->fc = 0; break; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 06c8f282263f..e182055ec2eb 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, next = skb->next; skb->next = NULL; - if (next) - skb->xmit_more = true; efx_enqueue_skb(tx_queue, skb); skb = next; } @@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { unsigned int old_insert_count = tx_queue->insert_count; - bool xmit_more = skb->xmit_more; + bool xmit_more = netdev_xmit_more(); bool data_mapped = false; unsigned int segments; unsigned int skb_len; @@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) if (rc) goto err; #ifdef EFX_USE_PIO - } else if (skb_len <= efx_piobuf_size && !skb->xmit_more && + } else if (skb_len <= efx_piobuf_size && !xmit_more && efx_nic_may_tx_pio(tx_queue)) { /* Use PIO for short packets with an empty queue. */ if (efx_enqueue_skb_pio(tx_queue, skb)) @@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) { struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); - /* There could be packets left on the partner queue if those - * SKBs had skb->xmit_more set. If we do not push those they + /* There could be packets left on the partner queue if + * xmit_more was set. If we do not push those they * could be left for a long time and cause a netdev watchdog. */ if (txq2->xmit_more_available) @@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) efx_nic_push_buffers(tx_queue); } else { - tx_queue->xmit_more_available = skb->xmit_more; + tx_queue->xmit_more_available = xmit_more; } if (segments) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 37d5e6fe7473..085b700a4994 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -143,6 +143,11 @@ #define XGMAC_RSF BIT(5) #define XGMAC_RTC GENMASK(1, 0) #define XGMAC_RTC_SHIFT 0 +#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x))) +#define XGMAC_RFD GENMASK(31, 17) +#define XGMAC_RFD_SHIFT 17 +#define XGMAC_RFA GENMASK(15, 1) +#define XGMAC_RFA_SHIFT 1 #define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x))) #define XGMAC_RXOIE BIT(16) #define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 2ba712b48a89..e79037f511e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -147,6 +147,52 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode, value &= ~XGMAC_RQS; value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS; + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { + u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + unsigned int rfd, rfa; + + value |= XGMAC_EHFC; + + /* Set Threshold for Activating Flow Control to min 2 frames, + * i.e. 1500 * 2 = 3000 bytes. + * + * Set Threshold for Deactivating Flow Control to min 1 frame, + * i.e. 1500 bytes. + */ + switch (fifosz) { + case 4096: + /* This violates the above formula because of FIFO size + * limit therefore overflow may occur in spite of this. + */ + rfd = 0x03; /* Full-2.5K */ + rfa = 0x01; /* Full-1.5K */ + break; + + case 8192: + rfd = 0x06; /* Full-4K */ + rfa = 0x0a; /* Full-6K */ + break; + + case 16384: + rfd = 0x06; /* Full-4K */ + rfa = 0x12; /* Full-10K */ + break; + + default: + rfd = 0x06; /* Full-4K */ + rfa = 0x1e; /* Full-16K */ + break; + } + + flow &= ~XGMAC_RFD; + flow |= rfd << XGMAC_RFD_SHIFT; + + flow &= ~XGMAC_RFA; + flow |= rfa << XGMAC_RFA_SHIFT; + + writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel)); + } + writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel)); /* Enable MTL RX overflow */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48712437d0da..5ab2733e15e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -74,7 +74,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) -static int flow_ctrl = FLOW_OFF; +static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 644e42c181ee..01ea0d6f8819 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, } static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct vnet_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 590172818b92..96b883f965f6 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, } static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct vnet *vp = netdev_priv(dev); struct vnet_port *port = __tx_port_find(vp, skb); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c index 99d86e39ff54..bf6c1c6779ff 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c @@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel) smp_wmb(); ring->cur = cur_index + 1; - if (!pkt_info->skb->xmit_more || + if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, channel->queue_index))) xlgmac_tx_start_xmit(channel, ring); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 8b21b40a9fe5..afbdc9744230 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -20,7 +20,6 @@ config TI_DAVINCI_EMAC tristate "TI DaVinci EMAC Support" depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST select TI_DAVINCI_MDIO - select TI_DAVINCI_CPDMA select PHYLIB ---help--- This driver supports TI's DaVinci Ethernet . @@ -38,16 +37,6 @@ config TI_DAVINCI_MDIO To compile this driver as a module, choose M here: the module will be called davinci_mdio. This is recommended. -config TI_DAVINCI_CPDMA - tristate "TI DaVinci CPDMA Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST - select GENERIC_ALLOCATOR - ---help--- - This driver supports TI's DaVinci CPDMA dma engine. - - To compile this driver as a module, choose M here: the module - will be called davinci_cpdma. This is recommended. - config TI_CPSW_PHY_SEL bool "TI CPSW Phy mode Selection (DEPRECATED)" default n @@ -55,17 +44,10 @@ config TI_CPSW_PHY_SEL This driver supports configuring of the phy mode connected to the CPSW. DEPRECATED: use PHY_TI_GMII_SEL. -config TI_CPSW_ALE - tristate "TI CPSW ALE Support" - ---help--- - This driver supports TI's CPSW ALE module. - config TI_CPSW tristate "TI CPSW Switch Support" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST - select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO - select TI_CPSW_ALE select MFD_SYSCON select REGMAP ---help--- @@ -94,7 +76,6 @@ config TI_CPTS_MOD config TI_KEYSTONE_NETCP tristate "TI Keystone NETCP Core Support" - select TI_CPSW_ALE select TI_DAVINCI_MDIO depends on OF depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 0be551de821c..c3f53a40b48f 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -8,16 +8,15 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_CPMAC) += cpmac.o -obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o +obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o +ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o -obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o -obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o obj-$(CONFIG_TI_CPTS_MOD) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o -ti_cpsw-y := cpsw.o +ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o -keystone_netcp-y := netcp_core.o +keystone_netcp-y := netcp_core.o cpsw_ale.o obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index e2d47b24a869..3a655a4dc10e 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1,19 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2006, 2007 Eugene Konev * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 38d1cc557c11..bfa81bbfce3f 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -1,14 +1,4 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0+ #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index fec275e2208d..48e0924259f5 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -1,17 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* Texas Instruments Ethernet Switch Driver * * Copyright (C) 2013 Texas Instruments * * Module Author: Mugunthan V N <mugunthanvnm@ti.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/platform_device.h> diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a591583d120e..e37680654a13 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1,16 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments Ethernet Switch Driver * * Copyright (C) 2012 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> @@ -44,138 +37,13 @@ #include "cpsw.h" #include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_sl.h" #include "cpts.h" #include "davinci_cpdma.h" #include <net/pkt_sched.h> -#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ - NETIF_MSG_DRV | NETIF_MSG_LINK | \ - NETIF_MSG_IFUP | NETIF_MSG_INTR | \ - NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ - NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ - NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ - NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ - NETIF_MSG_RX_STATUS) - -#define cpsw_info(priv, type, format, ...) \ -do { \ - if (netif_msg_##type(priv) && net_ratelimit()) \ - dev_info(priv->dev, format, ## __VA_ARGS__); \ -} while (0) - -#define cpsw_err(priv, type, format, ...) \ -do { \ - if (netif_msg_##type(priv) && net_ratelimit()) \ - dev_err(priv->dev, format, ## __VA_ARGS__); \ -} while (0) - -#define cpsw_dbg(priv, type, format, ...) \ -do { \ - if (netif_msg_##type(priv) && net_ratelimit()) \ - dev_dbg(priv->dev, format, ## __VA_ARGS__); \ -} while (0) - -#define cpsw_notice(priv, type, format, ...) \ -do { \ - if (netif_msg_##type(priv) && net_ratelimit()) \ - dev_notice(priv->dev, format, ## __VA_ARGS__); \ -} while (0) - -#define ALE_ALL_PORTS 0x7 - -#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) -#define CPSW_MINOR_VERSION(reg) (reg & 0xff) -#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) - -#define CPSW_VERSION_1 0x19010a -#define CPSW_VERSION_2 0x19010c -#define CPSW_VERSION_3 0x19010f -#define CPSW_VERSION_4 0x190112 - -#define HOST_PORT_NUM 0 -#define CPSW_ALE_PORTS_NUM 3 -#define SLIVER_SIZE 0x40 - -#define CPSW1_HOST_PORT_OFFSET 0x028 -#define CPSW1_SLAVE_OFFSET 0x050 -#define CPSW1_SLAVE_SIZE 0x040 -#define CPSW1_CPDMA_OFFSET 0x100 -#define CPSW1_STATERAM_OFFSET 0x200 -#define CPSW1_HW_STATS 0x400 -#define CPSW1_CPTS_OFFSET 0x500 -#define CPSW1_ALE_OFFSET 0x600 -#define CPSW1_SLIVER_OFFSET 0x700 - -#define CPSW2_HOST_PORT_OFFSET 0x108 -#define CPSW2_SLAVE_OFFSET 0x200 -#define CPSW2_SLAVE_SIZE 0x100 -#define CPSW2_CPDMA_OFFSET 0x800 -#define CPSW2_HW_STATS 0x900 -#define CPSW2_STATERAM_OFFSET 0xa00 -#define CPSW2_CPTS_OFFSET 0xc00 -#define CPSW2_ALE_OFFSET 0xd00 -#define CPSW2_SLIVER_OFFSET 0xd80 -#define CPSW2_BD_OFFSET 0x2000 - -#define CPDMA_RXTHRESH 0x0c0 -#define CPDMA_RXFREE 0x0e0 -#define CPDMA_TXHDP 0x00 -#define CPDMA_RXHDP 0x20 -#define CPDMA_TXCP 0x40 -#define CPDMA_RXCP 0x60 - -#define CPSW_POLL_WEIGHT 64 -#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 -#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN) -#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\ - ETH_FCS_LEN +\ - CPSW_RX_VLAN_ENCAP_HDR_SIZE) - -#define RX_PRIORITY_MAPPING 0x76543210 -#define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x76543210 - -#define CPSW_VLAN_AWARE BIT(1) -#define CPSW_RX_VLAN_ENCAP BIT(2) -#define CPSW_ALE_VLAN_AWARE 1 - -#define CPSW_FIFO_NORMAL_MODE (0 << 16) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) - -#define CPSW_INTPACEEN (0x3f << 16) -#define CPSW_INTPRESCALE_MASK (0x7FF << 0) -#define CPSW_CMINTMAX_CNT 63 -#define CPSW_CMINTMIN_CNT 2 -#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) -#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) - -#define cpsw_slave_index(cpsw, priv) \ - ((cpsw->data.dual_emac) ? priv->emac_port : \ - cpsw->data.active_slave) -#define IRQ_NUM 2 -#define CPSW_MAX_QUEUES 8 -#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 -#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 -#define CPSW_FIFO_SHAPE_EN_SHIFT 16 -#define CPSW_FIFO_RATE_EN_SHIFT 20 -#define CPSW_TC_NUM 4 -#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) -#define CPSW_PCT_MASK 0x7f - -#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 -#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) -#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16 -#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8 -#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0) -enum { - CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0, - CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV, - CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG, - CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG, -}; - static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@ -192,369 +60,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; module_param(descs_pool_size, int, 0444); MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); -struct cpsw_wr_regs { - u32 id_ver; - u32 soft_reset; - u32 control; - u32 int_control; - u32 rx_thresh_en; - u32 rx_en; - u32 tx_en; - u32 misc_en; - u32 mem_allign1[8]; - u32 rx_thresh_stat; - u32 rx_stat; - u32 tx_stat; - u32 misc_stat; - u32 mem_allign2[8]; - u32 rx_imax; - u32 tx_imax; - -}; - -struct cpsw_ss_regs { - u32 id_ver; - u32 control; - u32 soft_reset; - u32 stat_port_en; - u32 ptype; - u32 soft_idle; - u32 thru_rate; - u32 gap_thresh; - u32 tx_start_wds; - u32 flow_control; - u32 vlan_ltype; - u32 ts_ltype; - u32 dlr_ltype; -}; - -/* CPSW_PORT_V1 */ -#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */ -#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */ -#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */ -#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */ -#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */ -#define CPSW1_TS_CTL 0x14 /* Time Sync Control */ -#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */ -#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */ - -/* CPSW_PORT_V2 */ -#define CPSW2_CONTROL 0x00 /* Control Register */ -#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */ -#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */ -#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */ -#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */ -#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */ -#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */ - -/* CPSW_PORT_V1 and V2 */ -#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */ -#define SA_HI 0x24 /* CPGMAC_SL Source Address High */ -#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */ - -/* CPSW_PORT_V2 only */ -#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */ -#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ - -/* Bit definitions for the CPSW2_CONTROL register */ -#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */ -#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ -#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ -#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ -#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */ -#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ -#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ -#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ -#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */ -#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */ -#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */ -#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */ -#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */ -#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */ -#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */ -#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */ -#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */ -#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */ - -#define CTRL_V2_TS_BITS \ - (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ - TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN) - -#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN) -#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN) -#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN) - - -#define CTRL_V3_TS_BITS \ - (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ - TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ - TS_LTYPE1_EN | VLAN_LTYPE1_EN) - -#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN) -#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN) -#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN) - -/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ -#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ -#define TS_SEQ_ID_OFFSET_MASK (0x3f) -#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */ -#define TS_MSG_TYPE_EN_MASK (0xffff) - -/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ -#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3)) - -/* Bit definitions for the CPSW1_TS_CTL register */ -#define CPSW_V1_TS_RX_EN BIT(0) -#define CPSW_V1_TS_TX_EN BIT(4) -#define CPSW_V1_MSG_TYPE_OFS 16 - -/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ -#define CPSW_V1_SEQ_ID_OFS_SHIFT 16 - -#define CPSW_MAX_BLKS_TX 15 -#define CPSW_MAX_BLKS_TX_SHIFT 4 -#define CPSW_MAX_BLKS_RX 5 - -struct cpsw_host_regs { - u32 max_blks; - u32 blk_cnt; - u32 tx_in_ctl; - u32 port_vlan; - u32 tx_pri_map; - u32 cpdma_tx_pri_map; - u32 cpdma_rx_chan_map; -}; - -struct cpsw_sliver_regs { - u32 id_ver; - u32 mac_control; - u32 mac_status; - u32 soft_reset; - u32 rx_maxlen; - u32 __reserved_0; - u32 rx_pause; - u32 tx_pause; - u32 __reserved_1; - u32 rx_pri_map; -}; - -struct cpsw_hw_stats { - u32 rxgoodframes; - u32 rxbroadcastframes; - u32 rxmulticastframes; - u32 rxpauseframes; - u32 rxcrcerrors; - u32 rxaligncodeerrors; - u32 rxoversizedframes; - u32 rxjabberframes; - u32 rxundersizedframes; - u32 rxfragments; - u32 __pad_0[2]; - u32 rxoctets; - u32 txgoodframes; - u32 txbroadcastframes; - u32 txmulticastframes; - u32 txpauseframes; - u32 txdeferredframes; - u32 txcollisionframes; - u32 txsinglecollframes; - u32 txmultcollframes; - u32 txexcessivecollisions; - u32 txlatecollisions; - u32 txunderrun; - u32 txcarriersenseerrors; - u32 txoctets; - u32 octetframes64; - u32 octetframes65t127; - u32 octetframes128t255; - u32 octetframes256t511; - u32 octetframes512t1023; - u32 octetframes1024tup; - u32 netoctets; - u32 rxsofoverruns; - u32 rxmofoverruns; - u32 rxdmaoverruns; -}; - -struct cpsw_slave_data { - struct device_node *phy_node; - char phy_id[MII_BUS_ID_SIZE]; - int phy_if; - u8 mac_addr[ETH_ALEN]; - u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ - struct phy *ifphy; -}; - -struct cpsw_platform_data { - struct cpsw_slave_data *slave_data; - u32 ss_reg_ofs; /* Subsystem control register offset */ - u32 channels; /* number of cpdma channels (symmetric) */ - u32 slaves; /* number of slave cpgmac ports */ - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ - u32 ale_entries; /* ale table size */ - u32 bd_ram_size; /*buffer descriptor ram size */ - u32 mac_control; /* Mac control register */ - u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ - bool dual_emac; /* Enable Dual EMAC mode */ -}; - -struct cpsw_slave { - void __iomem *regs; - struct cpsw_sliver_regs __iomem *sliver; - int slave_num; - u32 mac_control; - struct cpsw_slave_data *data; - struct phy_device *phy; - struct net_device *ndev; - u32 port_vlan; -}; - -static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) -{ - return readl_relaxed(slave->regs + offset); -} - -static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) -{ - writel_relaxed(val, slave->regs + offset); -} - -struct cpsw_vector { - struct cpdma_chan *ch; - int budget; -}; - -struct cpsw_common { - struct device *dev; - struct cpsw_platform_data data; - struct napi_struct napi_rx; - struct napi_struct napi_tx; - struct cpsw_ss_regs __iomem *regs; - struct cpsw_wr_regs __iomem *wr_regs; - u8 __iomem *hw_stats; - struct cpsw_host_regs __iomem *host_port_regs; - u32 version; - u32 coal_intvl; - u32 bus_freq_mhz; - int rx_packet_max; - struct cpsw_slave *slaves; - struct cpdma_ctlr *dma; - struct cpsw_vector txv[CPSW_MAX_QUEUES]; - struct cpsw_vector rxv[CPSW_MAX_QUEUES]; - struct cpsw_ale *ale; - bool quirk_irq; - bool rx_irq_disabled; - bool tx_irq_disabled; - u32 irqs_table[IRQ_NUM]; - struct cpts *cpts; - int rx_ch_num, tx_ch_num; - int speed; - int usage_count; -}; - -struct cpsw_priv { - struct net_device *ndev; - struct device *dev; - u32 msg_enable; - u8 mac_addr[ETH_ALEN]; - bool rx_pause; - bool tx_pause; - bool mqprio_hw; - int fifo_bw[CPSW_TC_NUM]; - int shp_cfg_speed; - int tx_ts_enabled; - int rx_ts_enabled; - u32 emac_port; - struct cpsw_common *cpsw; -}; - -struct cpsw_stats { - char stat_string[ETH_GSTRING_LEN]; - int type; - int sizeof_stat; - int stat_offset; -}; - -enum { - CPSW_STATS, - CPDMA_RX_STATS, - CPDMA_TX_STATS, -}; - -#define CPSW_STAT(m) CPSW_STATS, \ - FIELD_SIZEOF(struct cpsw_hw_stats, m), \ - offsetof(struct cpsw_hw_stats, m) -#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ - offsetof(struct cpdma_chan_stats, m) -#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ - offsetof(struct cpdma_chan_stats, m) - -static const struct cpsw_stats cpsw_gstrings_stats[] = { - { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, - { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, - { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, - { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, - { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, - { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, - { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, - { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, - { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, - { "Rx Fragments", CPSW_STAT(rxfragments) }, - { "Rx Octets", CPSW_STAT(rxoctets) }, - { "Good Tx Frames", CPSW_STAT(txgoodframes) }, - { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, - { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, - { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, - { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, - { "Collisions", CPSW_STAT(txcollisionframes) }, - { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, - { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, - { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, - { "Late Collisions", CPSW_STAT(txlatecollisions) }, - { "Tx Underrun", CPSW_STAT(txunderrun) }, - { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, - { "Tx Octets", CPSW_STAT(txoctets) }, - { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, - { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, - { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, - { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, - { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, - { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, - { "Net Octets", CPSW_STAT(netoctets) }, - { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, - { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, - { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, -}; - -static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { - { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, - { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, - { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, - { "misqueued", CPDMA_RX_STAT(misqueued) }, - { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, - { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, - { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, - { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, - { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, - { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, - { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, - { "requeue", CPDMA_RX_STAT(requeue) }, - { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, -}; - -#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) -#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) - -#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) -#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) #define for_each_slave(priv, func, arg...) \ do { \ struct cpsw_slave *slave; \ @@ -572,11 +77,6 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid); -static inline int cpsw_get_slave_port(u32 slave_num) -{ - return slave_num + 1; -} - static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -653,13 +153,6 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) } } -struct addr_sync_ctx { - struct net_device *ndev; - const u8 *addr; /* address to be synched */ - int consumed; /* number of address instances */ - int flush; /* flush flag */ -}; - /** * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes * if it's not deleted @@ -800,12 +293,17 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_port = -1; + + if (cpsw->data.dual_emac) + slave_port = priv->emac_port + 1; if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); - cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); return; } else { /* Disable promiscuous mode */ @@ -813,14 +311,15 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) } /* Restore allmulti on vlans if necessary */ - cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, + ndev->flags & IFF_ALLMULTI, slave_port); /* add/remove mcast address either for real netdev or for vlan */ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); } -static void cpsw_intr_enable(struct cpsw_common *cpsw) +void cpsw_intr_enable(struct cpsw_common *cpsw) { writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); @@ -829,7 +328,7 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw) return; } -static void cpsw_intr_disable(struct cpsw_common *cpsw) +void cpsw_intr_disable(struct cpsw_common *cpsw) { writel_relaxed(0, &cpsw->wr_regs->tx_en); writel_relaxed(0, &cpsw->wr_regs->rx_en); @@ -838,7 +337,7 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw) return; } -static void cpsw_tx_handler(void *token, int len, int status) +void cpsw_tx_handler(void *token, int len, int status) { struct netdev_queue *txq; struct sk_buff *skb = token; @@ -970,11 +469,9 @@ requeue: dev_kfree_skb_any(new_skb); } -static void cpsw_split_res(struct net_device *ndev) +void cpsw_split_res(struct cpsw_common *cpsw) { - struct cpsw_priv *priv = netdev_priv(ndev); u32 consumed_rate = 0, bigest_rate = 0; - struct cpsw_common *cpsw = priv->cpsw; struct cpsw_vector *txv = cpsw->txv; int i, ch_weight, rlim_ch_num = 0; int budget, bigest_rate_ch = 0; @@ -1254,29 +751,32 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, slave_port = cpsw_get_slave_port(slave->slave_num); if (phy->link) { - mac_control = cpsw->data.mac_control; - - /* enable forwarding */ - cpsw_ale_control_set(cpsw->ale, slave_port, - ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + mac_control = CPSW_SL_CTL_GMII_EN; if (phy->speed == 1000) - mac_control |= BIT(7); /* GIGABITEN */ + mac_control |= CPSW_SL_CTL_GIG; if (phy->duplex) - mac_control |= BIT(0); /* FULLDUPLEXEN */ + mac_control |= CPSW_SL_CTL_FULLDUPLEX; /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) - mac_control |= BIT(15); + mac_control |= CPSW_SL_CTL_IFCTL_A; /* in band mode only works in 10Mbps RGMII mode */ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) - mac_control |= BIT(18); /* In Band mode */ + mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ if (priv->rx_pause) - mac_control |= BIT(3); + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; if (priv->tx_pause) - mac_control |= BIT(4); + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + + if (mac_control != slave->mac_control) + cpsw_sl_ctl_set(slave->mac_sl, mac_control); + + /* enable forwarding */ + cpsw_ale_control_set(cpsw->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); *link = true; @@ -1290,12 +790,14 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* disable forwarding */ cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + + cpsw_sl_wait_for_idle(slave->mac_sl, 100); + + cpsw_sl_ctl_reset(slave->mac_sl); } - if (mac_control != slave->mac_control) { + if (mac_control != slave->mac_control) phy_print_status(phy); - writel_relaxed(mac_control, &slave->sliver->mac_control); - } slave->mac_control = mac_control; } @@ -1348,7 +850,7 @@ static void cpsw_adjust_link(struct net_device *ndev) if (link) { if (cpsw_need_resplit(cpsw)) - cpsw_split_res(ndev); + cpsw_split_res(cpsw); netif_carrier_on(ndev); if (netif_running(ndev)) @@ -1359,167 +861,6 @@ static void cpsw_adjust_link(struct net_device *ndev) } } -static int cpsw_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - coal->rx_coalesce_usecs = cpsw->coal_intvl; - return 0; -} - -static int cpsw_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - u32 int_ctrl; - u32 num_interrupts = 0; - u32 prescale = 0; - u32 addnl_dvdr = 1; - u32 coal_intvl = 0; - struct cpsw_common *cpsw = priv->cpsw; - - coal_intvl = coal->rx_coalesce_usecs; - - int_ctrl = readl(&cpsw->wr_regs->int_control); - prescale = cpsw->bus_freq_mhz * 4; - - if (!coal->rx_coalesce_usecs) { - int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); - goto update_return; - } - - if (coal_intvl < CPSW_CMINTMIN_INTVL) - coal_intvl = CPSW_CMINTMIN_INTVL; - - if (coal_intvl > CPSW_CMINTMAX_INTVL) { - /* Interrupt pacer works with 4us Pulse, we can - * throttle further by dilating the 4us pulse. - */ - addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; - - if (addnl_dvdr > 1) { - prescale *= addnl_dvdr; - if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) - coal_intvl = (CPSW_CMINTMAX_INTVL - * addnl_dvdr); - } else { - addnl_dvdr = 1; - coal_intvl = CPSW_CMINTMAX_INTVL; - } - } - - num_interrupts = (1000 * addnl_dvdr) / coal_intvl; - writel(num_interrupts, &cpsw->wr_regs->rx_imax); - writel(num_interrupts, &cpsw->wr_regs->tx_imax); - - int_ctrl |= CPSW_INTPACEEN; - int_ctrl &= (~CPSW_INTPRESCALE_MASK); - int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); - -update_return: - writel(int_ctrl, &cpsw->wr_regs->int_control); - - cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); - cpsw->coal_intvl = coal_intvl; - - return 0; -} - -static int cpsw_get_sset_count(struct net_device *ndev, int sset) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - switch (sset) { - case ETH_SS_STATS: - return (CPSW_STATS_COMMON_LEN + - (cpsw->rx_ch_num + cpsw->tx_ch_num) * - CPSW_STATS_CH_LEN); - default: - return -EOPNOTSUPP; - } -} - -static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) -{ - int ch_stats_len; - int line; - int i; - - ch_stats_len = CPSW_STATS_CH_LEN * ch_num; - for (i = 0; i < ch_stats_len; i++) { - line = i % CPSW_STATS_CH_LEN; - snprintf(*p, ETH_GSTRING_LEN, - "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", - (long)(i / CPSW_STATS_CH_LEN), - cpsw_gstrings_ch_stats[line].stat_string); - *p += ETH_GSTRING_LEN; - } -} - -static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { - memcpy(p, cpsw_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); - cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); - break; - } -} - -static void cpsw_get_ethtool_stats(struct net_device *ndev, - struct ethtool_stats *stats, u64 *data) -{ - u8 *p; - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - struct cpdma_chan_stats ch_stats; - int i, l, ch; - - /* Collect Davinci CPDMA stats for Rx and Tx Channel */ - for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) - data[l] = readl(cpsw->hw_stats + - cpsw_gstrings_stats[l].stat_offset); - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats); - for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { - p = (u8 *)&ch_stats + - cpsw_gstrings_ch_stats[i].stat_offset; - data[l] = *(u32 *)p; - } - } - - for (ch = 0; ch < cpsw->tx_ch_num; ch++) { - cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats); - for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { - p = (u8 *)&ch_stats + - cpsw_gstrings_ch_stats[i].stat_offset; - data[l] = *(u32 *)p; - } - } -} - -static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, - struct sk_buff *skb, - struct cpdma_chan *txch) -{ - struct cpsw_common *cpsw = priv->cpsw; - - skb_tx_timestamp(skb); - return cpdma_chan_submit(txch, skb, skb->data, skb->len, - priv->emac_port + cpsw->data.dual_emac); -} - static inline void cpsw_add_dual_emac_def_ale_entries( struct cpsw_priv *priv, struct cpsw_slave *slave, u32 slave_port) @@ -1542,24 +883,18 @@ static inline void cpsw_add_dual_emac_def_ale_entries( ALE_PORT_DROP_UNKNOWN_VLAN, 1); } -static void soft_reset_slave(struct cpsw_slave *slave) -{ - char name[32]; - - snprintf(name, sizeof(name), "slave-%d", slave->slave_num); - soft_reset(name, &slave->sliver->soft_reset); -} - static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { u32 slave_port; struct phy_device *phy; struct cpsw_common *cpsw = priv->cpsw; - soft_reset_slave(slave); + cpsw_sl_reset(slave->mac_sl, 100); + cpsw_sl_ctl_reset(slave->mac_sl); /* setup priority mapping */ - writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, + RX_PRIORITY_MAPPING); switch (cpsw->version) { case CPSW_VERSION_1: @@ -1585,7 +920,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } /* setup max packet size, and mac address */ - writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen); + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, + cpsw->rx_packet_max); cpsw_set_slave_mac(slave, priv); slave->mac_control = 0; /* no link yet */ @@ -1696,7 +1032,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) } } -static int cpsw_fill_rx_channels(struct cpsw_priv *priv) +int cpsw_fill_rx_channels(struct cpsw_priv *priv) { struct cpsw_common *cpsw = priv->cpsw; struct sk_buff *skb; @@ -1748,7 +1084,8 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) slave->phy = NULL; cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); - soft_reset_slave(slave); + cpsw_sl_reset(slave->mac_sl, 100); + cpsw_sl_ctl_reset(slave->mac_sl); } static int cpsw_tc_to_fifo(int tc, int num_tc) @@ -2114,7 +1451,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) for_each_slave(priv, cpsw_slave_stop, cpsw); if (cpsw_need_resplit(cpsw)) - cpsw_split_res(ndev); + cpsw_split_res(cpsw); cpsw->usage_count--; pm_runtime_put_sync(cpsw->dev); @@ -2147,7 +1484,9 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, txch = cpsw->txv[q_idx].ch; txq = netdev_get_tx_queue(ndev, q_idx); - ret = cpsw_tx_packet_submit(priv, skb, txch); + skb_tx_timestamp(skb); + ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, + priv->emac_port + cpsw->data.dual_emac); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; @@ -2418,18 +1757,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void cpsw_ndo_poll_controller(struct net_device *ndev) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - cpsw_intr_disable(cpsw); - cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); - cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); - cpsw_intr_enable(cpsw); -} -#endif - static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { @@ -2601,7 +1928,7 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; } - cpsw_split_res(ndev); + cpsw_split_res(cpsw); return ret; } @@ -2677,6 +2004,18 @@ static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, } } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cpsw_ndo_poll_controller(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + cpsw_intr_disable(cpsw); + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); + cpsw_intr_enable(cpsw); +} +#endif + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -2695,25 +2034,6 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_setup_tc = cpsw_ndo_setup_tc, }; -static int cpsw_get_regs_len(struct net_device *ndev) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); -} - -static void cpsw_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *p) -{ - u32 *reg = p; - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - /* update CPSW IP version */ - regs->version = cpsw->version; - - cpsw_ale_dump(cpsw->ale, reg); -} - static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -2725,119 +2045,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev, strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } -static u32 cpsw_get_msglevel(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - return priv->msg_enable; -} - -static void cpsw_set_msglevel(struct net_device *ndev, u32 value) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - priv->msg_enable = value; -} - -#if IS_ENABLED(CONFIG_TI_CPTS) -static int cpsw_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = cpsw->cpts->phc_index; - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); - return 0; -} -#else -static int cpsw_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) -{ - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; - info->tx_types = 0; - info->rx_filters = 0; - return 0; -} -#endif - -static int cpsw_get_link_ksettings(struct net_device *ndev, - struct ethtool_link_ksettings *ecmd) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (!cpsw->slaves[slave_no].phy) - return -EOPNOTSUPP; - - phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd); - return 0; -} - -static int cpsw_set_link_ksettings(struct net_device *ndev, - const struct ethtool_link_ksettings *ecmd) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, - ecmd); - else - return -EOPNOTSUPP; -} - -static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - wol->supported = 0; - wol->wolopts = 0; - - if (cpsw->slaves[slave_no].phy) - phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); -} - -static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); - else - return -EOPNOTSUPP; -} - -static void cpsw_get_pauseparam(struct net_device *ndev, - struct ethtool_pauseparam *pause) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - - pause->autoneg = AUTONEG_DISABLE; - pause->rx_pause = priv->rx_pause ? true : false; - pause->tx_pause = priv->tx_pause ? true : false; -} - static int cpsw_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { @@ -2851,316 +2058,10 @@ static int cpsw_set_pauseparam(struct net_device *ndev, return 0; } -static int cpsw_ethtool_op_begin(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int ret; - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(cpsw->dev); - } - - return ret; -} - -static void cpsw_ethtool_op_complete(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - int ret; - - ret = pm_runtime_put(priv->cpsw->dev); - if (ret < 0) - cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); -} - -static void cpsw_get_channels(struct net_device *ndev, - struct ethtool_channels *ch) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; - ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; - ch->max_combined = 0; - ch->max_other = 0; - ch->other_count = 0; - ch->rx_count = cpsw->rx_ch_num; - ch->tx_count = cpsw->tx_ch_num; - ch->combined_count = 0; -} - -static int cpsw_check_ch_settings(struct cpsw_common *cpsw, - struct ethtool_channels *ch) -{ - if (cpsw->quirk_irq) { - dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); - return -EOPNOTSUPP; - } - - if (ch->combined_count) - return -EINVAL; - - /* verify we have at least one channel in each direction */ - if (!ch->rx_count || !ch->tx_count) - return -EINVAL; - - if (ch->rx_count > cpsw->data.channels || - ch->tx_count > cpsw->data.channels) - return -EINVAL; - - return 0; -} - -static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) -{ - struct cpsw_common *cpsw = priv->cpsw; - void (*handler)(void *, int, int); - struct netdev_queue *queue; - struct cpsw_vector *vec; - int ret, *ch, vch; - - if (rx) { - ch = &cpsw->rx_ch_num; - vec = cpsw->rxv; - handler = cpsw_rx_handler; - } else { - ch = &cpsw->tx_ch_num; - vec = cpsw->txv; - handler = cpsw_tx_handler; - } - - while (*ch < ch_num) { - vch = rx ? *ch : 7 - *ch; - vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); - queue = netdev_get_tx_queue(priv->ndev, *ch); - queue->tx_maxrate = 0; - - if (IS_ERR(vec[*ch].ch)) - return PTR_ERR(vec[*ch].ch); - - if (!vec[*ch].ch) - return -EINVAL; - - cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, - (rx ? "rx" : "tx")); - (*ch)++; - } - - while (*ch > ch_num) { - (*ch)--; - - ret = cpdma_chan_destroy(vec[*ch].ch); - if (ret) - return ret; - - cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, - (rx ? "rx" : "tx")); - } - - return 0; -} - -static int cpsw_update_channels(struct cpsw_priv *priv, - struct ethtool_channels *ch) -{ - int ret; - - ret = cpsw_update_channels_res(priv, ch->rx_count, 1); - if (ret) - return ret; - - ret = cpsw_update_channels_res(priv, ch->tx_count, 0); - if (ret) - return ret; - - return 0; -} - -static void cpsw_suspend_data_pass(struct net_device *ndev) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - struct cpsw_slave *slave; - int i; - - /* Disable NAPI scheduling */ - cpsw_intr_disable(cpsw); - - /* Stop all transmit queues for every network device. - * Disable re-using rx descriptors with dormant_on. - */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - netif_tx_stop_all_queues(slave->ndev); - netif_dormant_on(slave->ndev); - } - - /* Handle rest of tx packets and stop cpdma channels */ - cpdma_ctlr_stop(cpsw->dma); -} - -static int cpsw_resume_data_pass(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int i, ret; - - /* Allow rx packets handling */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) - if (slave->ndev && netif_running(slave->ndev)) - netif_dormant_off(slave->ndev); - - /* After this receive is started */ - if (cpsw->usage_count) { - ret = cpsw_fill_rx_channels(priv); - if (ret) - return ret; - - cpdma_ctlr_start(cpsw->dma); - cpsw_intr_enable(cpsw); - } - - /* Resume transmit for every affected interface */ - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) - if (slave->ndev && netif_running(slave->ndev)) - netif_tx_start_all_queues(slave->ndev); - - return 0; -} - static int cpsw_set_channels(struct net_device *ndev, struct ethtool_channels *chs) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int i, ret; - - ret = cpsw_check_ch_settings(cpsw, chs); - if (ret < 0) - return ret; - - cpsw_suspend_data_pass(ndev); - ret = cpsw_update_channels(priv, chs); - if (ret) - goto err; - - for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { - if (!(slave->ndev && netif_running(slave->ndev))) - continue; - - /* Inform stack about new count of queues */ - ret = netif_set_real_num_tx_queues(slave->ndev, - cpsw->tx_ch_num); - if (ret) { - dev_err(priv->dev, "cannot set real number of tx queues\n"); - goto err; - } - - ret = netif_set_real_num_rx_queues(slave->ndev, - cpsw->rx_ch_num); - if (ret) { - dev_err(priv->dev, "cannot set real number of rx queues\n"); - goto err; - } - } - - if (cpsw->usage_count) - cpsw_split_res(ndev); - - ret = cpsw_resume_data_pass(ndev); - if (!ret) - return 0; -err: - dev_err(priv->dev, "cannot update channels number, closing device\n"); - dev_close(ndev); - return ret; -} - -static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata); - else - return -EOPNOTSUPP; -} - -static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata); - else - return -EOPNOTSUPP; -} - -static int cpsw_nway_reset(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (cpsw->slaves[slave_no].phy) - return genphy_restart_aneg(cpsw->slaves[slave_no].phy); - else - return -EOPNOTSUPP; -} - -static void cpsw_get_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - - /* not supported */ - ering->tx_max_pending = 0; - ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); - ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES; - ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); -} - -static int cpsw_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *ering) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int ret; - - /* ignore ering->tx_pending - only rx_pending adjustment is supported */ - - if (ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending < CPSW_MAX_QUEUES || - ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES)) - return -EINVAL; - - if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) - return 0; - - cpsw_suspend_data_pass(ndev); - - cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); - - if (cpsw->usage_count) - cpdma_chan_split_pool(cpsw->dma); - - ret = cpsw_resume_data_pass(ndev); - if (!ret) - return 0; - - dev_err(&ndev->dev, "cannot set ring params, closing device\n"); - dev_close(ndev); - return ret; + return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); } static const struct ethtool_ops cpsw_ethtool_ops = { @@ -3193,19 +2094,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .set_ringparam = cpsw_set_ringparam, }; -static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, - u32 slave_reg_ofs, u32 sliver_reg_ofs) -{ - void __iomem *regs = cpsw->regs; - int slave_num = slave->slave_num; - struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num; - - slave->data = data; - slave->regs = regs + slave_reg_ofs; - slave->sliver = regs + sliver_reg_ofs; - slave->port_vlan = data->dual_emac_res_vlan; -} - static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *pdev) { @@ -3408,7 +2296,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) struct cpsw_priv *priv_sl2; int ret = 0; - ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); + ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv), + CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); if (!ndev) { dev_err(cpsw->dev, "cpsw: error allocating net_device\n"); return -ENOMEM; @@ -3442,11 +2331,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) /* register the network device */ SET_NETDEV_DEV(ndev, cpsw->dev); ret = register_netdev(ndev); - if (ret) { + if (ret) dev_err(cpsw->dev, "cpsw: error registering net device\n"); - free_netdev(ndev); - ret = -ENODEV; - } return ret; } @@ -3467,63 +2353,74 @@ static const struct soc_device_attribute cpsw_soc_devices[] = { static int cpsw_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct clk *clk; struct cpsw_platform_data *data; struct net_device *ndev; struct cpsw_priv *priv; - struct cpdma_params dma_params; - struct cpsw_ale_params ale_params; void __iomem *ss_regs; - void __iomem *cpts_regs; struct resource *res, *ss_res; struct gpio_descs *mode; - u32 slave_offset, sliver_offset, slave_size; const struct soc_device_attribute *soc; struct cpsw_common *cpsw; - int ret = 0, i, ch; + int ret = 0, ch; int irq; - cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); + cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); if (!cpsw) return -ENOMEM; - cpsw->dev = &pdev->dev; + cpsw->dev = dev; - ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); - if (!ndev) { - dev_err(&pdev->dev, "error allocating net_device\n"); - return -ENOMEM; - } - - platform_set_drvdata(pdev, ndev); - priv = netdev_priv(ndev); - priv->cpsw = cpsw; - priv->ndev = ndev; - priv->dev = &ndev->dev; - priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - cpsw->rx_packet_max = max(rx_packet_max, 128); - - mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW); + mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); if (IS_ERR(mode)) { ret = PTR_ERR(mode); - dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); - goto clean_ndev_ret; + dev_err(dev, "gpio request failed, ret %d\n", ret); + return ret; } + clk = devm_clk_get(dev, "fck"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "fck is not found %d\n", ret); + return ret; + } + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; + + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(dev, ss_res); + if (IS_ERR(ss_regs)) + return PTR_ERR(ss_regs); + cpsw->regs = ss_regs; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cpsw->wr_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(cpsw->wr_regs)) + return PTR_ERR(cpsw->wr_regs); + + /* RX IRQ */ + irq = platform_get_irq(pdev, 1); + if (irq < 0) + return irq; + cpsw->irqs_table[0] = irq; + + /* TX IRQ */ + irq = platform_get_irq(pdev, 2); + if (irq < 0) + return irq; + cpsw->irqs_table[1] = irq; + /* * This may be required here for child devices. */ - pm_runtime_enable(&pdev->dev); - - /* Select default pin state */ - pinctrl_pm_select_default_state(&pdev->dev); + pm_runtime_enable(dev); /* Need to enable clocks with runtime PM api to access module * registers */ - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(dev); if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_put_noidle(dev); goto clean_runtime_disable_ret; } @@ -3531,170 +2428,72 @@ static int cpsw_probe(struct platform_device *pdev) if (ret) goto clean_dt_ret; - data = &cpsw->data; - cpsw->rx_ch_num = 1; - cpsw->tx_ch_num = 1; - - if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { - memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); - dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr); - } else { - eth_random_addr(priv->mac_addr); - dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr); - } - - memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + soc = soc_device_match(cpsw_soc_devices); + if (soc) + cpsw->quirk_irq = 1; - cpsw->slaves = devm_kcalloc(&pdev->dev, + data = &cpsw->data; + cpsw->slaves = devm_kcalloc(dev, data->slaves, sizeof(struct cpsw_slave), GFP_KERNEL); if (!cpsw->slaves) { ret = -ENOMEM; goto clean_dt_ret; } - for (i = 0; i < data->slaves; i++) - cpsw->slaves[i].slave_num = i; - - cpsw->slaves[0].ndev = ndev; - priv->emac_port = 0; - - clk = devm_clk_get(&pdev->dev, "fck"); - if (IS_ERR(clk)) { - dev_err(priv->dev, "fck is not found\n"); - ret = -ENODEV; - goto clean_dt_ret; - } - cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; - - ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); - if (IS_ERR(ss_regs)) { - ret = PTR_ERR(ss_regs); - goto clean_dt_ret; - } - cpsw->regs = ss_regs; - - cpsw->version = readl(&cpsw->regs->id_ver); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(cpsw->wr_regs)) { - ret = PTR_ERR(cpsw->wr_regs); - goto clean_dt_ret; - } - memset(&dma_params, 0, sizeof(dma_params)); - memset(&ale_params, 0, sizeof(ale_params)); + cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE); + cpsw->descs_pool_size = descs_pool_size; - switch (cpsw->version) { - case CPSW_VERSION_1: - cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - cpts_regs = ss_regs + CPSW1_CPTS_OFFSET; - cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; - dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; - dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; - ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; - slave_offset = CPSW1_SLAVE_OFFSET; - slave_size = CPSW1_SLAVE_SIZE; - sliver_offset = CPSW1_SLIVER_OFFSET; - dma_params.desc_mem_phys = 0; - break; - case CPSW_VERSION_2: - case CPSW_VERSION_3: - case CPSW_VERSION_4: - cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - cpts_regs = ss_regs + CPSW2_CPTS_OFFSET; - cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; - dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; - dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; - ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; - slave_offset = CPSW2_SLAVE_OFFSET; - slave_size = CPSW2_SLAVE_SIZE; - sliver_offset = CPSW2_SLIVER_OFFSET; - dma_params.desc_mem_phys = - (u32 __force) ss_res->start + CPSW2_BD_OFFSET; - break; - default: - dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); - ret = -ENODEV; - goto clean_dt_ret; - } - for (i = 0; i < cpsw->data.slaves; i++) { - struct cpsw_slave *slave = &cpsw->slaves[i]; - - cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset); - slave_offset += slave_size; - sliver_offset += SLIVER_SIZE; - } - - dma_params.dev = &pdev->dev; - dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; - dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; - dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; - dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; - dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; - - dma_params.num_chan = data->channels; - dma_params.has_soft_reset = true; - dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; - dma_params.desc_mem_size = data->bd_ram_size; - dma_params.desc_align = 16; - dma_params.has_ext_regs = true; - dma_params.desc_hw_addr = dma_params.desc_mem_phys; - dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; - dma_params.descs_pool_size = descs_pool_size; - - cpsw->dma = cpdma_ctlr_create(&dma_params); - if (!cpsw->dma) { - dev_err(priv->dev, "error initializing dma\n"); - ret = -ENOMEM; + ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, + ss_res->start + CPSW2_BD_OFFSET, + descs_pool_size); + if (ret) goto clean_dt_ret; - } - - soc = soc_device_match(cpsw_soc_devices); - if (soc) - cpsw->quirk_irq = 1; ch = cpsw->quirk_irq ? 0 : 7; cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { - dev_err(priv->dev, "error initializing tx dma channel\n"); + dev_err(dev, "error initializing tx dma channel\n"); ret = PTR_ERR(cpsw->txv[0].ch); - goto clean_dma_ret; + goto clean_cpts; } cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); if (IS_ERR(cpsw->rxv[0].ch)) { - dev_err(priv->dev, "error initializing rx dma channel\n"); + dev_err(dev, "error initializing rx dma channel\n"); ret = PTR_ERR(cpsw->rxv[0].ch); - goto clean_dma_ret; + goto clean_cpts; } + cpsw_split_res(cpsw); - ale_params.dev = &pdev->dev; - ale_params.ale_ageout = ale_ageout; - ale_params.ale_entries = data->ale_entries; - ale_params.ale_ports = CPSW_ALE_PORTS_NUM; - - cpsw->ale = cpsw_ale_create(&ale_params); - if (!cpsw->ale) { - dev_err(priv->dev, "error initializing ale engine\n"); - ret = -ENODEV; - goto clean_dma_ret; + /* setup netdev */ + ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), + CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); + if (!ndev) { + dev_err(dev, "error allocating net_device\n"); + goto clean_cpts; } - cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); - if (IS_ERR(cpsw->cpts)) { - ret = PTR_ERR(cpsw->cpts); - goto clean_dma_ret; - } + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->cpsw = cpsw; + priv->ndev = ndev; + priv->dev = dev; + priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv->emac_port = 0; - ndev->irq = platform_get_irq(pdev, 1); - if (ndev->irq < 0) { - dev_err(priv->dev, "error getting irq resource\n"); - ret = ndev->irq; - goto clean_dma_ret; + if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { + memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); + dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr); + } else { + eth_random_addr(priv->mac_addr); + dev_info(dev, "Random MACID = %pM\n", priv->mac_addr); } + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + + cpsw->slaves[0].ndev = ndev; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; @@ -3705,15 +2504,14 @@ static int cpsw_probe(struct platform_device *pdev) netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, CPSW_POLL_WEIGHT); - cpsw_split_res(ndev); /* register the network device */ - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, dev); ret = register_netdev(ndev); if (ret) { - dev_err(priv->dev, "error registering net device\n"); + dev_err(dev, "error registering net device\n"); ret = -ENODEV; - goto clean_dma_ret; + goto clean_cpts; } if (cpsw->data.dual_emac) { @@ -3731,40 +2529,24 @@ static int cpsw_probe(struct platform_device *pdev) * If anyone wants to implement support for those, make sure to * first request and append them to irqs_table array. */ - - /* RX IRQ */ - irq = platform_get_irq(pdev, 1); - if (irq < 0) { - ret = irq; - goto clean_dma_ret; - } - - cpsw->irqs_table[0] = irq; - ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, - 0, dev_name(&pdev->dev), cpsw); + ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, + 0, dev_name(dev), cpsw); if (ret < 0) { - dev_err(priv->dev, "error attaching irq (%d)\n", ret); - goto clean_dma_ret; + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev_ret; } - /* TX IRQ */ - irq = platform_get_irq(pdev, 2); - if (irq < 0) { - ret = irq; - goto clean_dma_ret; - } - cpsw->irqs_table[1] = irq; - ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, + ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { - dev_err(priv->dev, "error attaching irq (%d)\n", ret); - goto clean_dma_ret; + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev_ret; } cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d, pool size %d)\n", - &ss_res->start, ndev->irq, dma_params.descs_pool_size); + &ss_res->start, cpsw->irqs_table[0], descs_pool_size); pm_runtime_put(&pdev->dev); @@ -3772,15 +2554,14 @@ static int cpsw_probe(struct platform_device *pdev) clean_unregister_netdev_ret: unregister_netdev(ndev); -clean_dma_ret: +clean_cpts: + cpts_release(cpsw->cpts); cpdma_ctlr_destroy(cpsw->dma); clean_dt_ret: cpsw_remove_dt(pdev); pm_runtime_put_sync(&pdev->dev); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); -clean_ndev_ret: - free_netdev(priv->ndev); return ret; } @@ -3805,9 +2586,6 @@ static int cpsw_remove(struct platform_device *pdev) cpsw_remove_dt(pdev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - if (cpsw->data.dual_emac) - free_netdev(cpsw->slaves[1].ndev); - free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index 907e05fc22e4..35d602f03281 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -1,15 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Texas Instruments Ethernet Switch Driver * * Copyright (C) 2013 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __CPSW_H__ #define __CPSW_H__ diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 798c989d5d93..84025dcc78d5 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -1,16 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments N-Port Ethernet Switch Address Lookup Engine * * Copyright (C) 2012 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> @@ -287,6 +280,9 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) if (cpsw_ale_get_mcast(ale_entry)) { u8 addr[6]; + if (cpsw_ale_get_super(ale_entry)) + continue; + cpsw_ale_get_addr(ale_entry, addr); if (!is_broadcast_ether_addr(addr)) cpsw_ale_flush_mcast(ale, ale_entry, port_mask); @@ -296,7 +292,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) } return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast); static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, int flags, u16 vid) @@ -334,7 +329,6 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port, cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast); int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port, int flags, u16 vid) @@ -350,7 +344,6 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port, cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast); int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid, int mcast_state) @@ -365,7 +358,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); cpsw_ale_set_addr(ale_entry, addr); - cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); + cpsw_ale_set_super(ale_entry, (flags & ALE_SUPER) ? 1 : 0); cpsw_ale_set_mcast_state(ale_entry, mcast_state); mask = cpsw_ale_get_port_mask(ale_entry, @@ -384,7 +377,6 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast); int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid) @@ -407,7 +399,6 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast); /* ALE NetCP NU switch specific vlan functions */ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, @@ -458,7 +449,6 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) { @@ -480,40 +470,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_write(ale, idx, ale_entry); return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan); -void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port) { u32 ale_entry[ALE_ENTRY_WORDS]; - int type, idx; int unreg_mcast = 0; - - /* Only bother doing the work if the setting is actually changing */ - if (ale->allmulti == allmulti) - return; - - /* Remember the new setting to check against next time */ - ale->allmulti = allmulti; + int type, idx; for (idx = 0; idx < ale->params.ale_entries; idx++) { + int vlan_members; + cpsw_ale_read(ale, idx, ale_entry); type = cpsw_ale_get_entry_type(ale_entry); if (type != ALE_TYPE_VLAN) continue; + vlan_members = + cpsw_ale_get_vlan_member_list(ale_entry, + ale->vlan_field_bits); + + if (port != -1 && !(vlan_members & BIT(port))) + continue; unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry, ale->vlan_field_bits); if (allmulti) - unreg_mcast |= 1; + unreg_mcast |= ALE_PORT_HOST; else - unreg_mcast &= ~1; + unreg_mcast &= ~ALE_PORT_HOST; cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, ale->vlan_field_bits); cpsw_ale_write(ale, idx, ale_entry); } } -EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti); struct ale_control_info { const char *name; @@ -739,7 +728,6 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, return 0; } -EXPORT_SYMBOL_GPL(cpsw_ale_control_set); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) { @@ -763,7 +751,6 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift; return tmp & BITMASK(info->bits); } -EXPORT_SYMBOL_GPL(cpsw_ale_control_get); static void cpsw_ale_timer(struct timer_list *t) { @@ -788,14 +775,12 @@ void cpsw_ale_start(struct cpsw_ale *ale) add_timer(&ale->timer); } } -EXPORT_SYMBOL_GPL(cpsw_ale_start); void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } -EXPORT_SYMBOL_GPL(cpsw_ale_stop); struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) { @@ -879,7 +864,6 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) return ale; } -EXPORT_SYMBOL_GPL(cpsw_ale_create); void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) { @@ -890,8 +874,3 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) data += ALE_ENTRY_WORDS; } } -EXPORT_SYMBOL_GPL(cpsw_ale_dump); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("TI CPSW ALE driver"); -MODULE_AUTHOR("Texas Instruments"); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index cd07a3e96d57..370df254eb12 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -1,16 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs * * Copyright (C) 2012 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __TI_CPSW_ALE_H__ #define __TI_CPSW_ALE_H__ @@ -37,7 +30,6 @@ struct cpsw_ale { struct cpsw_ale_params params; struct timer_list timer; unsigned long ageout; - int allmulti; u32 version; /* These bits are different on NetCP NU Switch ALE */ u32 port_mask_bits; @@ -116,7 +108,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); -void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c new file mode 100644 index 000000000000..a4a7ec0d2531 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -0,0 +1,719 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments Ethernet Switch Driver ethtool intf + * + * Copyright (C) 2019 Texas Instruments + */ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/kmemleak.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/pm_runtime.h> +#include <linux/skbuff.h> + +#include "cpsw.h" +#include "cpts.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "davinci_cpdma.h" + +struct cpsw_hw_stats { + u32 rxgoodframes; + u32 rxbroadcastframes; + u32 rxmulticastframes; + u32 rxpauseframes; + u32 rxcrcerrors; + u32 rxaligncodeerrors; + u32 rxoversizedframes; + u32 rxjabberframes; + u32 rxundersizedframes; + u32 rxfragments; + u32 __pad_0[2]; + u32 rxoctets; + u32 txgoodframes; + u32 txbroadcastframes; + u32 txmulticastframes; + u32 txpauseframes; + u32 txdeferredframes; + u32 txcollisionframes; + u32 txsinglecollframes; + u32 txmultcollframes; + u32 txexcessivecollisions; + u32 txlatecollisions; + u32 txunderrun; + u32 txcarriersenseerrors; + u32 txoctets; + u32 octetframes64; + u32 octetframes65t127; + u32 octetframes128t255; + u32 octetframes256t511; + u32 octetframes512t1023; + u32 octetframes1024tup; + u32 netoctets; + u32 rxsofoverruns; + u32 rxmofoverruns; + u32 rxdmaoverruns; +}; + +struct cpsw_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum { + CPSW_STATS, + CPDMA_RX_STATS, + CPDMA_TX_STATS, +}; + +#define CPSW_STAT(m) CPSW_STATS, \ + FIELD_SIZEOF(struct cpsw_hw_stats, m), \ + offsetof(struct cpsw_hw_stats, m) +#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ + offsetof(struct cpdma_chan_stats, m) +#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ + FIELD_SIZEOF(struct cpdma_chan_stats, m), \ + offsetof(struct cpdma_chan_stats, m) + +static const struct cpsw_stats cpsw_gstrings_stats[] = { + { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, + { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, + { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, + { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, + { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, + { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, + { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, + { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, + { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, + { "Rx Fragments", CPSW_STAT(rxfragments) }, + { "Rx Octets", CPSW_STAT(rxoctets) }, + { "Good Tx Frames", CPSW_STAT(txgoodframes) }, + { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, + { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, + { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, + { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, + { "Collisions", CPSW_STAT(txcollisionframes) }, + { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, + { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, + { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, + { "Late Collisions", CPSW_STAT(txlatecollisions) }, + { "Tx Underrun", CPSW_STAT(txunderrun) }, + { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, + { "Tx Octets", CPSW_STAT(txoctets) }, + { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, + { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, + { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, + { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, + { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, + { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, + { "Net Octets", CPSW_STAT(netoctets) }, + { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, + { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, + { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, +}; + +static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { + { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "misqueued", CPDMA_RX_STAT(misqueued) }, + { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "requeue", CPDMA_RX_STAT(requeue) }, + { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, +}; + +#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) +#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) + +u32 cpsw_get_msglevel(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +void cpsw_set_msglevel(struct net_device *ndev, u32 value) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + coal->rx_coalesce_usecs = cpsw->coal_intvl; + return 0; +} + +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + struct cpsw_common *cpsw = priv->cpsw; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&cpsw->wr_regs->int_control); + prescale = cpsw->bus_freq_mhz * 4; + + if (!coal->rx_coalesce_usecs) { + int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); + goto update_return; + } + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &cpsw->wr_regs->rx_imax); + writel(num_interrupts, &cpsw->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + +update_return: + writel(int_ctrl, &cpsw->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + cpsw->coal_intvl = coal_intvl; + + return 0; +} + +int cpsw_get_sset_count(struct net_device *ndev, int sset) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + switch (sset) { + case ETH_SS_STATS: + return (CPSW_STATS_COMMON_LEN + + (cpsw->rx_ch_num + cpsw->tx_ch_num) * + CPSW_STATS_CH_LEN); + default: + return -EOPNOTSUPP; + } +} + +static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) +{ + int ch_stats_len; + int line; + int i; + + ch_stats_len = CPSW_STATS_CH_LEN * ch_num; + for (i = 0; i < ch_stats_len; i++) { + line = i % CPSW_STATS_CH_LEN; + snprintf(*p, ETH_GSTRING_LEN, + "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", + (long)(i / CPSW_STATS_CH_LEN), + cpsw_gstrings_ch_stats[line].stat_string); + *p += ETH_GSTRING_LEN; + } +} + +void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { + memcpy(p, cpsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); + cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); + break; + } +} + +void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + u8 *p; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpdma_chan_stats ch_stats; + int i, l, ch; + + /* Collect Davinci CPDMA stats for Rx and Tx Channel */ + for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) + data[l] = readl(cpsw->hw_stats + + cpsw_gstrings_stats[l].stat_offset); + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; + } + } + + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; + } + } +} + +void cpsw_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + pause->autoneg = AUTONEG_DISABLE; + pause->rx_pause = priv->rx_pause ? true : false; + pause->tx_pause = priv->tx_pause ? true : false; +} + +void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + wol->supported = 0; + wol->wolopts = 0; + + if (cpsw->slaves[slave_no].phy) + phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); +} + +int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); + else + return -EOPNOTSUPP; +} + +int cpsw_get_regs_len(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); +} + +void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) +{ + u32 *reg = p; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + /* update CPSW IP version */ + regs->version = cpsw->version; + + cpsw_ale_dump(cpsw->ale, reg); +} + +int cpsw_ethtool_op_begin(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); + pm_runtime_put_noidle(cpsw->dev); + } + + return ret; +} + +void cpsw_ethtool_op_complete(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_put(priv->cpsw->dev); + if (ret < 0) + cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); +} + +void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; + ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; + ch->max_combined = 0; + ch->max_other = 0; + ch->other_count = 0; + ch->rx_count = cpsw->rx_ch_num; + ch->tx_count = cpsw->tx_ch_num; + ch->combined_count = 0; +} + +int cpsw_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (!cpsw->slaves[slave_no].phy) + return -EOPNOTSUPP; + + phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd); + return 0; +} + +int cpsw_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (!cpsw->slaves[slave_no].phy) + return -EOPNOTSUPP; + + return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd); +} + +int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata); + else + return -EOPNOTSUPP; +} + +int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata); + else + return -EOPNOTSUPP; +} + +int cpsw_nway_reset(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (cpsw->slaves[slave_no].phy) + return genphy_restart_aneg(cpsw->slaves[slave_no].phy); + else + return -EOPNOTSUPP; +} + +static void cpsw_suspend_data_pass(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_slave *slave; + int i; + + /* Disable NAPI scheduling */ + cpsw_intr_disable(cpsw); + + /* Stop all transmit queues for every network device. + * Disable re-using rx descriptors with dormant_on. + */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + netif_tx_stop_all_queues(slave->ndev); + netif_dormant_on(slave->ndev); + } + + /* Handle rest of tx packets and stop cpdma channels */ + cpdma_ctlr_stop(cpsw->dma); +} + +static int cpsw_resume_data_pass(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + /* Allow rx packets handling */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_dormant_off(slave->ndev); + + /* After this receive is started */ + if (cpsw->usage_count) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + return ret; + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) + if (slave->ndev && netif_running(slave->ndev)) + netif_tx_start_all_queues(slave->ndev); + + return 0; +} + +static int cpsw_check_ch_settings(struct cpsw_common *cpsw, + struct ethtool_channels *ch) +{ + if (cpsw->quirk_irq) { + dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); + return -EOPNOTSUPP; + } + + if (ch->combined_count) + return -EINVAL; + + /* verify we have at least one channel in each direction */ + if (!ch->rx_count || !ch->tx_count) + return -EINVAL; + + if (ch->rx_count > cpsw->data.channels || + ch->tx_count > cpsw->data.channels) + return -EINVAL; + + return 0; +} + +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx, + cpdma_handler_fn rx_handler) +{ + struct cpsw_common *cpsw = priv->cpsw; + void (*handler)(void *, int, int); + struct netdev_queue *queue; + struct cpsw_vector *vec; + int ret, *ch, vch; + + if (rx) { + ch = &cpsw->rx_ch_num; + vec = cpsw->rxv; + handler = rx_handler; + } else { + ch = &cpsw->tx_ch_num; + vec = cpsw->txv; + handler = cpsw_tx_handler; + } + + while (*ch < ch_num) { + vch = rx ? *ch : 7 - *ch; + vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); + queue = netdev_get_tx_queue(priv->ndev, *ch); + queue->tx_maxrate = 0; + + if (IS_ERR(vec[*ch].ch)) + return PTR_ERR(vec[*ch].ch); + + if (!vec[*ch].ch) + return -EINVAL; + + cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + (*ch)++; + } + + while (*ch > ch_num) { + (*ch)--; + + ret = cpdma_chan_destroy(vec[*ch].ch); + if (ret) + return ret; + + cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + } + + return 0; +} + +int cpsw_set_channels_common(struct net_device *ndev, + struct ethtool_channels *chs, + cpdma_handler_fn rx_handler) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + cpsw_suspend_data_pass(ndev); + + ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler); + if (ret) + goto err; + + ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler); + if (ret) + goto err; + + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + /* Inform stack about new count of queues */ + ret = netif_set_real_num_tx_queues(slave->ndev, + cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto err; + } + + ret = netif_set_real_num_rx_queues(slave->ndev, + cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto err; + } + } + + if (cpsw->usage_count) + cpsw_split_res(cpsw); + + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; +err: + dev_err(priv->dev, "cannot update channels number, closing device\n"); + dev_close(ndev); + return ret; +} + +void cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + /* not supported */ + ering->tx_max_pending = 0; + ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); + ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; + ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); +} + +int cpsw_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + /* ignore ering->tx_pending - only rx_pending adjustment is supported */ + + if (ering->rx_mini_pending || ering->rx_jumbo_pending || + ering->rx_pending < CPSW_MAX_QUEUES || + ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES)) + return -EINVAL; + + if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma)) + return 0; + + cpsw_suspend_data_pass(ndev); + + cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); + + if (cpsw->usage_count) + cpdma_chan_split_pool(cpsw->dma); + + ret = cpsw_resume_data_pass(ndev); + if (!ret) + return 0; + + dev_err(cpsw->dev, "cannot set ring params, closing device\n"); + dev_close(ndev); + return ret; +} + +#if IS_ENABLED(CONFIG_TI_CPTS) +int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = cpsw->cpts->phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + return 0; +} +#else +int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + info->tx_types = 0; + info->rx_filters = 0; + return 0; +} +#endif diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c new file mode 100644 index 000000000000..476d050a022c --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2019 Texas Instruments + */ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> + +#include "cpts.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_sl.h" +#include "davinci_cpdma.h" + +int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, + int ale_ageout, phys_addr_t desc_mem_phys, + int descs_pool_size) +{ + u32 slave_offset, sliver_offset, slave_size; + struct cpsw_ale_params ale_params; + struct cpsw_platform_data *data; + struct cpdma_params dma_params; + struct device *dev = cpsw->dev; + void __iomem *cpts_regs; + int ret = 0, i; + + data = &cpsw->data; + cpsw->rx_ch_num = 1; + cpsw->tx_ch_num = 1; + + cpsw->version = readl(&cpsw->regs->id_ver); + + memset(&dma_params, 0, sizeof(dma_params)); + memset(&ale_params, 0, sizeof(ale_params)); + + switch (cpsw->version) { + case CPSW_VERSION_1: + cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; + cpts_regs = ss_regs + CPSW1_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; + dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; + dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; + ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; + slave_offset = CPSW1_SLAVE_OFFSET; + slave_size = CPSW1_SLAVE_SIZE; + sliver_offset = CPSW1_SLIVER_OFFSET; + dma_params.desc_mem_phys = 0; + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: + cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; + cpts_regs = ss_regs + CPSW2_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; + dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; + dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; + ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; + slave_offset = CPSW2_SLAVE_OFFSET; + slave_size = CPSW2_SLAVE_SIZE; + sliver_offset = CPSW2_SLIVER_OFFSET; + dma_params.desc_mem_phys = desc_mem_phys; + break; + default: + dev_err(dev, "unknown version 0x%08x\n", cpsw->version); + return -ENODEV; + } + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + void __iomem *regs = cpsw->regs; + + slave->slave_num = i; + slave->data = &cpsw->data.slave_data[i]; + slave->regs = regs + slave_offset; + slave->port_vlan = slave->data->dual_emac_res_vlan; + slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset); + if (IS_ERR(slave->mac_sl)) + return PTR_ERR(slave->mac_sl); + + slave_offset += slave_size; + sliver_offset += SLIVER_SIZE; + } + + ale_params.dev = dev; + ale_params.ale_ageout = ale_ageout; + ale_params.ale_entries = data->ale_entries; + ale_params.ale_ports = CPSW_ALE_PORTS_NUM; + + cpsw->ale = cpsw_ale_create(&ale_params); + if (!cpsw->ale) { + dev_err(dev, "error initializing ale engine\n"); + return -ENODEV; + } + + dma_params.dev = dev; + dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; + dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; + dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; + dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; + dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; + + dma_params.num_chan = data->channels; + dma_params.has_soft_reset = true; + dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; + dma_params.desc_mem_size = data->bd_ram_size; + dma_params.desc_align = 16; + dma_params.has_ext_regs = true; + dma_params.desc_hw_addr = dma_params.desc_mem_phys; + dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; + dma_params.descs_pool_size = descs_pool_size; + + cpsw->dma = cpdma_ctlr_create(&dma_params); + if (!cpsw->dma) { + dev_err(dev, "error initializing dma\n"); + return -ENOMEM; + } + + cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); + if (IS_ERR(cpsw->cpts)) { + ret = PTR_ERR(cpsw->cpts); + cpdma_ctlr_destroy(cpsw->dma); + } + + return ret; +} diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h new file mode 100644 index 000000000000..04795b97ee71 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -0,0 +1,429 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Texas Instruments Ethernet Switch Driver + */ + +#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ +#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ + +#include "davinci_cpdma.h" + +#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ + NETIF_MSG_DRV | NETIF_MSG_LINK | \ + NETIF_MSG_IFUP | NETIF_MSG_INTR | \ + NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ + NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_RX_STATUS) + +#define cpsw_info(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_info(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_err(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_err(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_dbg(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_dbg(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_notice(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_notice(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define ALE_ALL_PORTS 0x7 + +#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) +#define CPSW_MINOR_VERSION(reg) (reg & 0xff) +#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) + +#define CPSW_VERSION_1 0x19010a +#define CPSW_VERSION_2 0x19010c +#define CPSW_VERSION_3 0x19010f +#define CPSW_VERSION_4 0x190112 + +#define HOST_PORT_NUM 0 +#define CPSW_ALE_PORTS_NUM 3 +#define SLIVER_SIZE 0x40 + +#define CPSW1_HOST_PORT_OFFSET 0x028 +#define CPSW1_SLAVE_OFFSET 0x050 +#define CPSW1_SLAVE_SIZE 0x040 +#define CPSW1_CPDMA_OFFSET 0x100 +#define CPSW1_STATERAM_OFFSET 0x200 +#define CPSW1_HW_STATS 0x400 +#define CPSW1_CPTS_OFFSET 0x500 +#define CPSW1_ALE_OFFSET 0x600 +#define CPSW1_SLIVER_OFFSET 0x700 + +#define CPSW2_HOST_PORT_OFFSET 0x108 +#define CPSW2_SLAVE_OFFSET 0x200 +#define CPSW2_SLAVE_SIZE 0x100 +#define CPSW2_CPDMA_OFFSET 0x800 +#define CPSW2_HW_STATS 0x900 +#define CPSW2_STATERAM_OFFSET 0xa00 +#define CPSW2_CPTS_OFFSET 0xc00 +#define CPSW2_ALE_OFFSET 0xd00 +#define CPSW2_SLIVER_OFFSET 0xd80 +#define CPSW2_BD_OFFSET 0x2000 + +#define CPDMA_RXTHRESH 0x0c0 +#define CPDMA_RXFREE 0x0e0 +#define CPDMA_TXHDP 0x00 +#define CPDMA_RXHDP 0x20 +#define CPDMA_TXCP 0x40 +#define CPDMA_RXCP 0x60 + +#define CPSW_POLL_WEIGHT 64 +#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 +#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN) +#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\ + ETH_FCS_LEN +\ + CPSW_RX_VLAN_ENCAP_HDR_SIZE) + +#define RX_PRIORITY_MAPPING 0x76543210 +#define TX_PRIORITY_MAPPING 0x33221100 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 + +#define CPSW_VLAN_AWARE BIT(1) +#define CPSW_RX_VLAN_ENCAP BIT(2) +#define CPSW_ALE_VLAN_AWARE 1 + +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) + +#define CPSW_INTPACEEN (0x3f << 16) +#define CPSW_INTPRESCALE_MASK (0x7FF << 0) +#define CPSW_CMINTMAX_CNT 63 +#define CPSW_CMINTMIN_CNT 2 +#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) +#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + +#define IRQ_NUM 2 +#define CPSW_MAX_QUEUES 8 +#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 +#define CPSW_FIFO_SHAPE_EN_SHIFT 16 +#define CPSW_FIFO_RATE_EN_SHIFT 20 +#define CPSW_TC_NUM 4 +#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) +#define CPSW_PCT_MASK 0x7f + +#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 +#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) +#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16 +#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8 +#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0) +enum { + CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0, + CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV, + CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG, + CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG, +}; + +struct cpsw_wr_regs { + u32 id_ver; + u32 soft_reset; + u32 control; + u32 int_control; + u32 rx_thresh_en; + u32 rx_en; + u32 tx_en; + u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + +}; + +struct cpsw_ss_regs { + u32 id_ver; + u32 control; + u32 soft_reset; + u32 stat_port_en; + u32 ptype; + u32 soft_idle; + u32 thru_rate; + u32 gap_thresh; + u32 tx_start_wds; + u32 flow_control; + u32 vlan_ltype; + u32 ts_ltype; + u32 dlr_ltype; +}; + +/* CPSW_PORT_V1 */ +#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */ +#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */ +#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */ +#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */ +#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */ +#define CPSW1_TS_CTL 0x14 /* Time Sync Control */ +#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */ +#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */ + +/* CPSW_PORT_V2 */ +#define CPSW2_CONTROL 0x00 /* Control Register */ +#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */ +#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */ +#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */ +#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */ +#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */ +#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */ + +/* CPSW_PORT_V1 and V2 */ +#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */ +#define SA_HI 0x24 /* CPGMAC_SL Source Address High */ +#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */ + +/* CPSW_PORT_V2 only */ +#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ + +/* Bit definitions for the CPSW2_CONTROL register */ +#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */ +#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */ +#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */ +#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */ +#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */ +#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */ +#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */ +#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */ +#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */ +#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */ +#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */ +#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */ +#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */ +#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */ +#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */ +#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */ +#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */ + +#define CTRL_V2_TS_BITS \ + (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN) + +#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN) +#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN) + + +#define CTRL_V3_TS_BITS \ + (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ + TS_LTYPE1_EN | VLAN_LTYPE1_EN) + +#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN) +#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN) + +/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ +#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ +#define TS_SEQ_ID_OFFSET_MASK (0x3f) +#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */ +#define TS_MSG_TYPE_EN_MASK (0xffff) + +/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ +#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3)) + +/* Bit definitions for the CPSW1_TS_CTL register */ +#define CPSW_V1_TS_RX_EN BIT(0) +#define CPSW_V1_TS_TX_EN BIT(4) +#define CPSW_V1_MSG_TYPE_OFS 16 + +/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ +#define CPSW_V1_SEQ_ID_OFS_SHIFT 16 + +#define CPSW_MAX_BLKS_TX 15 +#define CPSW_MAX_BLKS_TX_SHIFT 4 +#define CPSW_MAX_BLKS_RX 5 + +struct cpsw_host_regs { + u32 max_blks; + u32 blk_cnt; + u32 tx_in_ctl; + u32 port_vlan; + u32 tx_pri_map; + u32 cpdma_tx_pri_map; + u32 cpdma_rx_chan_map; +}; + +struct cpsw_slave_data { + struct device_node *phy_node; + char phy_id[MII_BUS_ID_SIZE]; + int phy_if; + u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ + struct phy *ifphy; +}; + +struct cpsw_platform_data { + struct cpsw_slave_data *slave_data; + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 slaves; /* number of slave cpgmac ports */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 ale_entries; /* ale table size */ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ +}; + +struct cpsw_slave { + void __iomem *regs; + int slave_num; + u32 mac_control; + struct cpsw_slave_data *data; + struct phy_device *phy; + struct net_device *ndev; + u32 port_vlan; + struct cpsw_sl *mac_sl; +}; + +static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) +{ + return readl_relaxed(slave->regs + offset); +} + +static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) +{ + writel_relaxed(val, slave->regs + offset); +} + +struct cpsw_vector { + struct cpdma_chan *ch; + int budget; +}; + +struct cpsw_common { + struct device *dev; + struct cpsw_platform_data data; + struct napi_struct napi_rx; + struct napi_struct napi_tx; + struct cpsw_ss_regs __iomem *regs; + struct cpsw_wr_regs __iomem *wr_regs; + u8 __iomem *hw_stats; + struct cpsw_host_regs __iomem *host_port_regs; + u32 version; + u32 coal_intvl; + u32 bus_freq_mhz; + int rx_packet_max; + int descs_pool_size; + struct cpsw_slave *slaves; + struct cpdma_ctlr *dma; + struct cpsw_vector txv[CPSW_MAX_QUEUES]; + struct cpsw_vector rxv[CPSW_MAX_QUEUES]; + struct cpsw_ale *ale; + bool quirk_irq; + bool rx_irq_disabled; + bool tx_irq_disabled; + u32 irqs_table[IRQ_NUM]; + struct cpts *cpts; + int rx_ch_num, tx_ch_num; + int speed; + int usage_count; +}; + +struct cpsw_priv { + struct net_device *ndev; + struct device *dev; + u32 msg_enable; + u8 mac_addr[ETH_ALEN]; + bool rx_pause; + bool tx_pause; + bool mqprio_hw; + int fifo_bw[CPSW_TC_NUM]; + int shp_cfg_speed; + int tx_ts_enabled; + int rx_ts_enabled; + u32 emac_port; + struct cpsw_common *cpsw; +}; + +#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) +#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) + +#define cpsw_slave_index(cpsw, priv) \ + ((cpsw->data.dual_emac) ? priv->emac_port : \ + cpsw->data.active_slave) + +static inline int cpsw_get_slave_port(u32 slave_num) +{ + return slave_num + 1; +} + +struct addr_sync_ctx { + struct net_device *ndev; + const u8 *addr; /* address to be synched */ + int consumed; /* number of address instances */ + int flush; /* flush flag */ +}; + +int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, + int ale_ageout, phys_addr_t desc_mem_phys, + int descs_pool_size); +void cpsw_split_res(struct cpsw_common *cpsw); +int cpsw_fill_rx_channels(struct cpsw_priv *priv); +void cpsw_intr_enable(struct cpsw_common *cpsw); +void cpsw_intr_disable(struct cpsw_common *cpsw); +void cpsw_tx_handler(void *token, int len, int status); + +/* ethtool */ +u32 cpsw_get_msglevel(struct net_device *ndev); +void cpsw_set_msglevel(struct net_device *ndev, u32 value); +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); +int cpsw_get_sset_count(struct net_device *ndev, int sset); +void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data); +void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data); +void cpsw_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause); +void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol); +int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol); +int cpsw_get_regs_len(struct net_device *ndev); +void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p); +int cpsw_ethtool_op_begin(struct net_device *ndev); +void cpsw_ethtool_op_complete(struct net_device *ndev); +void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch); +int cpsw_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *ecmd); +int cpsw_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *ecmd); +int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata); +int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata); +int cpsw_nway_reset(struct net_device *ndev); +void cpsw_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering); +int cpsw_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ering); +int cpsw_set_channels_common(struct net_device *ndev, + struct ethtool_channels *chs, + cpdma_handler_fn rx_handler); +int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info); + +#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */ diff --git a/drivers/net/ethernet/ti/cpsw_sl.c b/drivers/net/ethernet/ti/cpsw_sl.c new file mode 100644 index 000000000000..0c7531cb0f39 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_sl.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/ + * Ethernet MAC Sliver (CPGMAC_SL) + * + * Copyright (C) 2019 Texas Instruments + * + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/kernel.h> + +#include "cpsw_sl.h" + +#define CPSW_SL_REG_NOTUSED U16_MAX + +static const u16 cpsw_sl_reg_map_cpsw[] = { + [CPSW_SL_IDVER] = 0x00, + [CPSW_SL_MACCONTROL] = 0x04, + [CPSW_SL_MACSTATUS] = 0x08, + [CPSW_SL_SOFT_RESET] = 0x0c, + [CPSW_SL_RX_MAXLEN] = 0x10, + [CPSW_SL_BOFFTEST] = 0x14, + [CPSW_SL_RX_PAUSE] = 0x18, + [CPSW_SL_TX_PAUSE] = 0x1c, + [CPSW_SL_EMCONTROL] = 0x20, + [CPSW_SL_RX_PRI_MAP] = 0x24, + [CPSW_SL_TX_GAP] = 0x28, +}; + +static const u16 cpsw_sl_reg_map_66ak2hk[] = { + [CPSW_SL_IDVER] = 0x00, + [CPSW_SL_MACCONTROL] = 0x04, + [CPSW_SL_MACSTATUS] = 0x08, + [CPSW_SL_SOFT_RESET] = 0x0c, + [CPSW_SL_RX_MAXLEN] = 0x10, + [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_RX_PAUSE] = 0x18, + [CPSW_SL_TX_PAUSE] = 0x1c, + [CPSW_SL_EMCONTROL] = 0x20, + [CPSW_SL_RX_PRI_MAP] = 0x24, + [CPSW_SL_TX_GAP] = CPSW_SL_REG_NOTUSED, +}; + +static const u16 cpsw_sl_reg_map_66ak2x_xgbe[] = { + [CPSW_SL_IDVER] = 0x00, + [CPSW_SL_MACCONTROL] = 0x04, + [CPSW_SL_MACSTATUS] = 0x08, + [CPSW_SL_SOFT_RESET] = 0x0c, + [CPSW_SL_RX_MAXLEN] = 0x10, + [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_RX_PAUSE] = 0x18, + [CPSW_SL_TX_PAUSE] = 0x1c, + [CPSW_SL_EMCONTROL] = 0x20, + [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_TX_GAP] = 0x28, +}; + +static const u16 cpsw_sl_reg_map_66ak2elg_am65[] = { + [CPSW_SL_IDVER] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_MACCONTROL] = 0x00, + [CPSW_SL_MACSTATUS] = 0x04, + [CPSW_SL_SOFT_RESET] = 0x08, + [CPSW_SL_RX_MAXLEN] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_BOFFTEST] = 0x0c, + [CPSW_SL_RX_PAUSE] = 0x10, + [CPSW_SL_TX_PAUSE] = 0x40, + [CPSW_SL_EMCONTROL] = 0x70, + [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED, + [CPSW_SL_TX_GAP] = 0x74, +}; + +#define CPSW_SL_SOFT_RESET_BIT BIT(0) + +#define CPSW_SL_STATUS_PN_IDLE BIT(31) +#define CPSW_SL_AM65_STATUS_PN_E_IDLE BIT(30) +#define CPSW_SL_AM65_STATUS_PN_P_IDLE BIT(29) +#define CPSW_SL_AM65_STATUS_PN_TX_IDLE BIT(28) + +#define CPSW_SL_STATUS_IDLE_MASK_BASE (CPSW_SL_STATUS_PN_IDLE) + +#define CPSW_SL_STATUS_IDLE_MASK_K3 \ + (CPSW_SL_STATUS_IDLE_MASK_BASE | CPSW_SL_AM65_STATUS_PN_E_IDLE | \ + CPSW_SL_AM65_STATUS_PN_P_IDLE | CPSW_SL_AM65_STATUS_PN_TX_IDLE) + +#define CPSW_SL_CTL_FUNC_BASE \ + (CPSW_SL_CTL_FULLDUPLEX |\ + CPSW_SL_CTL_LOOPBACK |\ + CPSW_SL_CTL_RX_FLOW_EN |\ + CPSW_SL_CTL_TX_FLOW_EN |\ + CPSW_SL_CTL_GMII_EN |\ + CPSW_SL_CTL_TX_PACE |\ + CPSW_SL_CTL_GIG |\ + CPSW_SL_CTL_CMD_IDLE |\ + CPSW_SL_CTL_IFCTL_A |\ + CPSW_SL_CTL_IFCTL_B |\ + CPSW_SL_CTL_GIG_FORCE |\ + CPSW_SL_CTL_EXT_EN |\ + CPSW_SL_CTL_RX_CEF_EN |\ + CPSW_SL_CTL_RX_CSF_EN |\ + CPSW_SL_CTL_RX_CMF_EN) + +struct cpsw_sl { + struct device *dev; + void __iomem *sl_base; + const u16 *regs; + u32 control_features; + u32 idle_mask; +}; + +struct cpsw_sl_dev_id { + const char *device_id; + const u16 *regs; + const u32 control_features; + const u32 regs_offset; + const u32 idle_mask; +}; + +static const struct cpsw_sl_dev_id cpsw_sl_id_match[] = { + { + .device_id = "cpsw", + .regs = cpsw_sl_reg_map_cpsw, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_MTEST | + CPSW_SL_CTL_TX_SHORT_GAP_EN | + CPSW_SL_CTL_TX_SG_LIM_EN, + .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, + }, + { + .device_id = "66ak2hk", + .regs = cpsw_sl_reg_map_66ak2hk, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_TX_SHORT_GAP_EN, + .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, + }, + { + .device_id = "66ak2x_xgbe", + .regs = cpsw_sl_reg_map_66ak2x_xgbe, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_XGIG | + CPSW_SL_CTL_TX_SHORT_GAP_EN | + CPSW_SL_CTL_CRC_TYPE | + CPSW_SL_CTL_XGMII_EN, + .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, + }, + { + .device_id = "66ak2el", + .regs = cpsw_sl_reg_map_66ak2elg_am65, + .regs_offset = 0x330, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_MTEST | + CPSW_SL_CTL_TX_SHORT_GAP_EN | + CPSW_SL_CTL_CRC_TYPE | + CPSW_SL_CTL_EXT_EN_RX_FLO | + CPSW_SL_CTL_EXT_EN_TX_FLO | + CPSW_SL_CTL_TX_SG_LIM_EN, + .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, + }, + { + .device_id = "66ak2g", + .regs = cpsw_sl_reg_map_66ak2elg_am65, + .regs_offset = 0x330, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_MTEST | + CPSW_SL_CTL_CRC_TYPE | + CPSW_SL_CTL_EXT_EN_RX_FLO | + CPSW_SL_CTL_EXT_EN_TX_FLO, + }, + { + .device_id = "am65", + .regs = cpsw_sl_reg_map_66ak2elg_am65, + .regs_offset = 0x330, + .control_features = CPSW_SL_CTL_FUNC_BASE | + CPSW_SL_CTL_MTEST | + CPSW_SL_CTL_XGIG | + CPSW_SL_CTL_TX_SHORT_GAP_EN | + CPSW_SL_CTL_CRC_TYPE | + CPSW_SL_CTL_XGMII_EN | + CPSW_SL_CTL_EXT_EN_RX_FLO | + CPSW_SL_CTL_EXT_EN_TX_FLO | + CPSW_SL_CTL_TX_SG_LIM_EN | + CPSW_SL_CTL_EXT_EN_XGIG, + .idle_mask = CPSW_SL_STATUS_IDLE_MASK_K3, + }, + { }, +}; + +u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg) +{ + int val; + + if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) { + dev_err(sl->dev, "cpsw_sl: not sup r reg: %04X\n", + sl->regs[reg]); + return 0; + } + + val = readl(sl->sl_base + sl->regs[reg]); + dev_dbg(sl->dev, "cpsw_sl: reg: %04X r 0x%08X\n", sl->regs[reg], val); + return val; +} + +void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val) +{ + if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) { + dev_err(sl->dev, "cpsw_sl: not sup w reg: %04X\n", + sl->regs[reg]); + return; + } + + dev_dbg(sl->dev, "cpsw_sl: reg: %04X w 0x%08X\n", sl->regs[reg], val); + writel(val, sl->sl_base + sl->regs[reg]); +} + +static const struct cpsw_sl_dev_id *cpsw_sl_match_id( + const struct cpsw_sl_dev_id *id, + const char *device_id) +{ + if (!id || !device_id) + return NULL; + + while (id->device_id) { + if (strcmp(device_id, id->device_id) == 0) + return id; + id++; + } + return NULL; +} + +struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev, + void __iomem *sl_base) +{ + const struct cpsw_sl_dev_id *sl_dev_id; + struct cpsw_sl *sl; + + sl = devm_kzalloc(dev, sizeof(struct cpsw_sl), GFP_KERNEL); + if (!sl) + return ERR_PTR(-ENOMEM); + sl->dev = dev; + sl->sl_base = sl_base; + + sl_dev_id = cpsw_sl_match_id(cpsw_sl_id_match, device_id); + if (!sl_dev_id) { + dev_err(sl->dev, "cpsw_sl: dev_id %s not found.\n", device_id); + return ERR_PTR(-EINVAL); + } + sl->regs = sl_dev_id->regs; + sl->control_features = sl_dev_id->control_features; + sl->idle_mask = sl_dev_id->idle_mask; + sl->sl_base += sl_dev_id->regs_offset; + + return sl; +} + +void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(tmo); + + /* Set the soft reset bit */ + cpsw_sl_reg_write(sl, CPSW_SL_SOFT_RESET, CPSW_SL_SOFT_RESET_BIT); + + /* Wait for the bit to clear */ + do { + usleep_range(100, 200); + } while ((cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & + CPSW_SL_SOFT_RESET_BIT) && + time_after(timeout, jiffies)); + + if (cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT) + dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n"); +} + +u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs) +{ + u32 val; + + if (ctl_funcs & ~sl->control_features) { + dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n", + ctl_funcs & (~sl->control_features)); + return -EINVAL; + } + + val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL); + val |= ctl_funcs; + cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val); + + return 0; +} + +u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs) +{ + u32 val; + + if (ctl_funcs & ~sl->control_features) { + dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n", + ctl_funcs & (~sl->control_features)); + return -EINVAL; + } + + val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL); + val &= ~ctl_funcs; + cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val); + + return 0; +} + +void cpsw_sl_ctl_reset(struct cpsw_sl *sl) +{ + cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, 0); +} + +int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(tmo); + + do { + usleep_range(100, 200); + } while (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & + sl->idle_mask) && time_after(timeout, jiffies)); + + if (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask)) { + dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n"); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/net/ethernet/ti/cpsw_sl.h b/drivers/net/ethernet/ti/cpsw_sl.h new file mode 100644 index 000000000000..a6d06a5a420f --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_sl.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/ + * Ethernet MAC Sliver (CPGMAC_SL) APIs + * + * Copyright (C) 2019 Texas Instruments + * + */ + +#ifndef __TI_CPSW_SL_H__ +#define __TI_CPSW_SL_H__ + +#include <linux/device.h> + +enum cpsw_sl_regs { + CPSW_SL_IDVER, + CPSW_SL_MACCONTROL, + CPSW_SL_MACSTATUS, + CPSW_SL_SOFT_RESET, + CPSW_SL_RX_MAXLEN, + CPSW_SL_BOFFTEST, + CPSW_SL_RX_PAUSE, + CPSW_SL_TX_PAUSE, + CPSW_SL_EMCONTROL, + CPSW_SL_RX_PRI_MAP, + CPSW_SL_TX_GAP, +}; + +enum { + CPSW_SL_CTL_FULLDUPLEX = BIT(0), /* Full Duplex mode */ + CPSW_SL_CTL_LOOPBACK = BIT(1), /* Loop Back Mode */ + CPSW_SL_CTL_MTEST = BIT(2), /* Manufacturing Test mode */ + CPSW_SL_CTL_RX_FLOW_EN = BIT(3), /* Receive Flow Control Enable */ + CPSW_SL_CTL_TX_FLOW_EN = BIT(4), /* Transmit Flow Control Enable */ + CPSW_SL_CTL_GMII_EN = BIT(5), /* GMII Enable */ + CPSW_SL_CTL_TX_PACE = BIT(6), /* Transmit Pacing Enable */ + CPSW_SL_CTL_GIG = BIT(7), /* Gigabit Mode */ + CPSW_SL_CTL_XGIG = BIT(8), /* 10 Gigabit Mode */ + CPSW_SL_CTL_TX_SHORT_GAP_EN = BIT(10), /* Transmit Short Gap Enable */ + CPSW_SL_CTL_CMD_IDLE = BIT(11), /* Command Idle */ + CPSW_SL_CTL_CRC_TYPE = BIT(12), /* Port CRC Type */ + CPSW_SL_CTL_XGMII_EN = BIT(13), /* XGMII Enable */ + CPSW_SL_CTL_IFCTL_A = BIT(15), /* Interface Control A */ + CPSW_SL_CTL_IFCTL_B = BIT(16), /* Interface Control B */ + CPSW_SL_CTL_GIG_FORCE = BIT(17), /* Gigabit Mode Force */ + CPSW_SL_CTL_EXT_EN = BIT(18), /* External Control Enable */ + CPSW_SL_CTL_EXT_EN_RX_FLO = BIT(19), /* Ext RX Flow Control Enable */ + CPSW_SL_CTL_EXT_EN_TX_FLO = BIT(20), /* Ext TX Flow Control Enable */ + CPSW_SL_CTL_TX_SG_LIM_EN = BIT(21), /* TXt Short Gap Limit Enable */ + CPSW_SL_CTL_RX_CEF_EN = BIT(22), /* RX Copy Error Frames Enable */ + CPSW_SL_CTL_RX_CSF_EN = BIT(23), /* RX Copy Short Frames Enable */ + CPSW_SL_CTL_RX_CMF_EN = BIT(24), /* RX Copy MAC Control Frames Enable */ + CPSW_SL_CTL_EXT_EN_XGIG = BIT(25), /* Ext XGIG Control En, k3 only */ + + CPSW_SL_CTL_FUNCS_COUNT +}; + +struct cpsw_sl; + +struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev, + void __iomem *sl_base); + +void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo); + +u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs); +u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs); +void cpsw_sl_ctl_reset(struct cpsw_sl *sl); +int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo); + +u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg); +void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val); + +#endif /* __TI_CPSW_SL_H__ */ diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 2a9ba4acd7fa..e257018ada71 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -1,21 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * TI Common Platform Time Sync * * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/err.h> #include <linux/if.h> diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index d2c7decd59b6..024aab6af12f 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -1,21 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * TI Common Platform Time Sync * * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _TI_CPTS_H_ #define _TI_CPTS_H_ diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 4236dcdd5634..35bf14d8e7af 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -1,16 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments CPDMA Driver * * Copyright (C) 2010 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/spinlock.h> @@ -527,7 +520,6 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->num_chan = CPDMA_MAX_CHANNELS; return ctlr; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_create); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) { @@ -588,7 +580,6 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_start); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) { @@ -621,7 +612,6 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { @@ -639,7 +629,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) cpdma_desc_pool_destroy(ctlr); return ret; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) { @@ -660,25 +649,21 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) { dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); } -EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) { return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); } -EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) { return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); } -EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, int rx, int desc_num, @@ -774,7 +759,6 @@ int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_split_pool); /* cpdma_chan_set_weight - set weight of a channel in percentage. @@ -807,7 +791,6 @@ int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_set_weight); /* cpdma_chan_get_min_rate - get minimum allowed rate for channel * Should be called before cpdma_chan_set_rate. @@ -822,7 +805,6 @@ u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) return DIV_ROUND_UP(divident, divisor); } -EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); /* cpdma_chan_set_rate - limits bandwidth for transmit channel. * The bandwidth * limited channels have to be in order beginning from lowest. @@ -867,7 +849,6 @@ err: spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_set_rate); u32 cpdma_chan_get_rate(struct cpdma_chan *ch) { @@ -880,7 +861,6 @@ u32 cpdma_chan_get_rate(struct cpdma_chan *ch) return rate; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_rate); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler, int rx_type) @@ -940,7 +920,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, spin_unlock_irqrestore(&ctlr->lock, flags); return chan; } -EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) { @@ -953,7 +932,6 @@ int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) return desc_num; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); int cpdma_chan_destroy(struct cpdma_chan *chan) { @@ -975,7 +953,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_destroy); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats) @@ -988,7 +965,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, spin_unlock_irqrestore(&chan->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) @@ -1095,7 +1071,6 @@ unlock_ret: spin_unlock_irqrestore(&chan->lock, flags); return ret; } -EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { @@ -1110,7 +1085,6 @@ bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return free_tx_desc; } -EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc, @@ -1204,7 +1178,6 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota) } return used; } -EXPORT_SYMBOL_GPL(cpdma_chan_process); int cpdma_chan_start(struct cpdma_chan *chan) { @@ -1224,7 +1197,6 @@ int cpdma_chan_start(struct cpdma_chan *chan) return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_start); int cpdma_chan_stop(struct cpdma_chan *chan) { @@ -1287,7 +1259,6 @@ int cpdma_chan_stop(struct cpdma_chan *chan) spin_unlock_irqrestore(&chan->lock, flags); return 0; } -EXPORT_SYMBOL_GPL(cpdma_chan_stop); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) { @@ -1329,25 +1300,19 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) return ret; } -EXPORT_SYMBOL_GPL(cpdma_control_set); int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) { return ctlr->num_rx_desc; } -EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs); int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) { return ctlr->num_tx_desc; } -EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs); void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) { ctlr->num_rx_desc = num_rx_desc; ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; } -EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs); - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index d399af5389b8..10376062dafa 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -1,16 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Texas Instruments CPDMA Driver * * Copyright (C) 2010 Texas Instruments * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DAVINCI_CPDMA_H__ #define __DAVINCI_CPDMA_H__ @@ -34,8 +27,8 @@ struct cpdma_params { int num_chan; bool has_soft_reset; int min_packet_size; - u32 desc_mem_phys; - u32 desc_hw_addr; + dma_addr_t desc_mem_phys; + dma_addr_t desc_hw_addr; int desc_mem_size; int desc_align; u32 bus_freq_mhz; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 57450b174fc4..39075f5c73d5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * DaVinci Ethernet Medium Access Controller * @@ -6,21 +7,6 @@ * Copyright (C) 2009 Texas Instruments. * * --------------------------------------------------------------------------- - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * --------------------------------------------------------------------------- * History: * 0-5 A number of folks worked on this driver in bits and pieces but the major * contribution came from Suraj Iyer and Anant Gole diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index a98aedae1b41..11642721c123 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * DaVinci MDIO Module driver * @@ -7,22 +8,6 @@ * * Copyright (C) 2009 Texas Instruments. * - * --------------------------------------------------------------------------- - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * --------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> @@ -140,7 +125,7 @@ static void davinci_mdio_init_clk(struct davinci_mdio_data *data) static void davinci_mdio_enable(struct davinci_mdio_data *data) { /* set enable and clock divider */ - __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control); + writel(data->clk_div | CONTROL_ENABLE, &data->regs->control); } static int davinci_mdio_reset(struct mii_bus *bus) @@ -159,7 +144,7 @@ static int davinci_mdio_reset(struct mii_bus *bus) msleep(PHY_MAX_ADDR * data->access_time); /* dump hardware version info */ - ver = __raw_readl(&data->regs->version); + ver = readl(&data->regs->version); dev_info(data->dev, "davinci mdio revision %d.%d, bus freq %ld\n", (ver >> 8) & 0xff, ver & 0xff, @@ -169,7 +154,7 @@ static int davinci_mdio_reset(struct mii_bus *bus) goto done; /* get phy mask from the alive register */ - phy_mask = __raw_readl(&data->regs->alive); + phy_mask = readl(&data->regs->alive); if (phy_mask) { /* restrict mdio bus to live phys only */ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); @@ -196,11 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) u32 reg; while (time_after(timeout, jiffies)) { - reg = __raw_readl(®s->user[0].access); + reg = readl(®s->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; - reg = __raw_readl(®s->control); + reg = readl(®s->control); if ((reg & CONTROL_IDLE) == 0) { usleep_range(100, 200); continue; @@ -216,7 +201,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) return -EAGAIN; } - reg = __raw_readl(®s->user[0].access); + reg = readl(®s->user[0].access); if ((reg & USERACCESS_GO) == 0) return 0; @@ -263,7 +248,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (ret < 0) break; - __raw_writel(reg, &data->regs->user[0].access); + writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) @@ -271,7 +256,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (ret < 0) break; - reg = __raw_readl(&data->regs->user[0].access); + reg = readl(&data->regs->user[0].access); ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; break; } @@ -307,7 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, if (ret < 0) break; - __raw_writel(reg, &data->regs->user[0].access); + writel(reg, &data->regs->user[0].access); ret = wait_for_user_access(data); if (ret == -EAGAIN) @@ -412,7 +397,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(dev, res); + data->regs = devm_ioremap(dev, res->start, resource_size(res)); if (IS_ERR(data->regs)) return PTR_ERR(data->regs); @@ -472,9 +457,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev) u32 ctrl; /* shutdown the scan state machine */ - ctrl = __raw_readl(&data->regs->control); + ctrl = readl(&data->regs->control); ctrl &= ~CONTROL_ENABLE; - __raw_writel(ctrl, &data->regs->control); + writel(ctrl, &data->regs->control); wait_for_idle(data); return 0; diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index c4ffdf47bad5..43d5cd59b56b 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * NetCP driver local header * @@ -8,15 +9,6 @@ * Santosh Shilimkar <santosh.shilimkar@ti.com> * Wingman Kwok <w-kwok2@ti.com> * Murali Karicheri <m-karicheri2@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __NETCP_H__ #define __NETCP_H__ diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index d847f672a705..01d4ca331f8c 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Keystone NetCP Core driver * @@ -8,15 +9,6 @@ * Santosh Shilimkar <santosh.shilimkar@ti.com> * Murali Karicheri <m-karicheri2@ti.com> * Wingman Kwok <w-kwok2@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/io.h> diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 0a920c5936b2..ec179700c184 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Keystone GBE and XGBE subsystem code * @@ -7,15 +8,6 @@ * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * Wingman Kwok <w-kwok2@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/io.h> diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c index 5d8419f658d0..f7cf56d6351d 100644 --- a/drivers/net/ethernet/ti/netcp_sgmii.c +++ b/drivers/net/ethernet/ti/netcp_sgmii.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * SGMI module initialisation * @@ -6,14 +7,6 @@ * Sandeep Paulraj <s-paulraj@ti.com> * Wingman Kwok <w-kwok2@ti.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include "netcp.h" diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c index 33571acc52b6..112778aedd8a 100644 --- a/drivers/net/ethernet/ti/netcp_xgbepcsr.c +++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * XGE PCSR module initialisation * @@ -5,14 +6,6 @@ * Authors: Sandeep Nair <sandeep_n@ti.com> * WingMan Kwok <w-kwok2@ti.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include "netcp.h" diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index da4ec575ccf9..db448fad621b 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_XILINX bool "Xilinx devices" default y - depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS + depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -33,8 +33,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" - depends on (PPC || MICROBLAZE) - depends on !64BIT || BROKEN + depends on PPC || MICROBLAZE || X86 || COMPILE_TEST select PHYLIB ---help--- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 107575225383..1aeda084b8f1 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -334,6 +334,9 @@ struct temac_local { /* Connection to PHY device */ struct device_node *phy_node; + /* For non-device-tree devices */ + char phy_name[MII_BUS_ID_SIZE + 3]; + phy_interface_t phy_interface; /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ @@ -344,8 +347,10 @@ struct temac_local { #ifdef CONFIG_PPC_DCR dcr_host_t sdma_dcrs; #endif - u32 (*dma_in)(struct temac_local *, int); - void (*dma_out)(struct temac_local *, int, u32); + u32 (*temac_ior)(struct temac_local *lp, int offset); + void (*temac_iow)(struct temac_local *lp, int offset, u32 value); + u32 (*dma_in)(struct temac_local *lp, int reg); + void (*dma_out)(struct temac_local *lp, int reg, u32 value); int tx_irq; int rx_irq; @@ -353,7 +358,10 @@ struct temac_local { struct sk_buff **rx_skb; spinlock_t rx_lock; - struct mutex indirect_mutex; + /* For synchronization of indirect register access. Must be + * shared mutex between interfaces in same TEMAC block. + */ + struct mutex *indirect_mutex; u32 options; /* Current options word */ int last_link; unsigned int temac_features; @@ -367,18 +375,24 @@ struct temac_local { int tx_bd_next; int tx_bd_tail; int rx_bd_ci; + + /* DMA channel control setup */ + u32 tx_chnl_ctrl; + u32 rx_chnl_ctrl; }; +/* Wrappers for temac_ior()/temac_iow() function pointers above */ +#define temac_ior(lp, o) ((lp)->temac_ior(lp, o)) +#define temac_iow(lp, o, v) ((lp)->temac_iow(lp, o, v)) + /* xilinx_temac.c */ -u32 temac_ior(struct temac_local *lp, int offset); -void temac_iow(struct temac_local *lp, int offset, u32 value); int temac_indirect_busywait(struct temac_local *lp); u32 temac_indirect_in32(struct temac_local *lp, int reg); void temac_indirect_out32(struct temac_local *lp, int reg, u32 value); /* xilinx_temac_mdio.c */ -int temac_mdio_setup(struct temac_local *lp, struct device_node *np); +int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev); void temac_mdio_teardown(struct temac_local *lp); #endif /* XILINX_LL_TEMAC_H */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 44efffbe7970..ca95c726269a 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/if_ether.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_irq.h> @@ -51,6 +52,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> +#include <linux/platform_data/xilinx-ll-temac.h> #include "ll_temac.h" @@ -61,14 +63,24 @@ * Low level register access functions */ -u32 temac_ior(struct temac_local *lp, int offset) +u32 _temac_ior_be(struct temac_local *lp, int offset) { - return in_be32(lp->regs + offset); + return ioread32be(lp->regs + offset); } -void temac_iow(struct temac_local *lp, int offset, u32 value) +void _temac_iow_be(struct temac_local *lp, int offset, u32 value) { - out_be32(lp->regs + offset, value); + return iowrite32be(value, lp->regs + offset); +} + +u32 _temac_ior_le(struct temac_local *lp, int offset) +{ + return ioread32(lp->regs + offset); +} + +void _temac_iow_le(struct temac_local *lp, int offset, u32 value) +{ + return iowrite32(value, lp->regs + offset); } int temac_indirect_busywait(struct temac_local *lp) @@ -80,7 +92,7 @@ int temac_indirect_busywait(struct temac_local *lp) WARN_ON(1); return -ETIMEDOUT; } - msleep(1); + usleep_range(500, 1000); } return 0; } @@ -119,23 +131,35 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) } /** - * temac_dma_in32 - Memory mapped DMA read, this function expects a - * register input that is based on DCR word addresses which - * are then converted to memory mapped byte addresses + * temac_dma_in32_* - Memory mapped DMA read, these function expects a + * register input that is based on DCR word addresses which are then + * converted to memory mapped byte addresses. To be assigned to + * lp->dma_in32. */ -static u32 temac_dma_in32(struct temac_local *lp, int reg) +static u32 temac_dma_in32_be(struct temac_local *lp, int reg) { - return in_be32(lp->sdma_regs + (reg << 2)); + return ioread32be(lp->sdma_regs + (reg << 2)); +} + +static u32 temac_dma_in32_le(struct temac_local *lp, int reg) +{ + return ioread32(lp->sdma_regs + (reg << 2)); } /** - * temac_dma_out32 - Memory mapped DMA read, this function expects a - * register input that is based on DCR word addresses which - * are then converted to memory mapped byte addresses + * temac_dma_out32_* - Memory mapped DMA read, these function expects + * a register input that is based on DCR word addresses which are then + * converted to memory mapped byte addresses. To be assigned to + * lp->dma_out32. */ -static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) +static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value) +{ + iowrite32be(value, lp->sdma_regs + (reg << 2)); +} + +static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value) { - out_be32(lp->sdma_regs + (reg << 2), value); + iowrite32(value, lp->sdma_regs + (reg << 2)); } /* DMA register access functions can be DCR based or memory mapped. @@ -187,7 +211,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, /* * temac_dcr_setup - This is a stub for when DCR is not supported, - * such as with MicroBlaze + * such as with MicroBlaze and x86 */ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, struct device_node *np) @@ -225,7 +249,6 @@ static void temac_dma_bd_release(struct net_device *ndev) dma_free_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, lp->tx_bd_v, lp->tx_bd_p); - kfree(lp->rx_skb); } /** @@ -235,9 +258,11 @@ static int temac_dma_bd_init(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct sk_buff *skb; + dma_addr_t skb_dma_addr; int i; - lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); + lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), + GFP_KERNEL); if (!lp->rx_skb) goto out; @@ -256,13 +281,13 @@ static int temac_dma_bd_init(struct net_device *ndev) goto out; for (i = 0; i < TX_BD_NUM; i++) { - lp->tx_bd_v[i].next = lp->tx_bd_p + - sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); + lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p + + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); } for (i = 0; i < RX_BD_NUM; i++) { - lp->rx_bd_v[i].next = lp->rx_bd_p + - sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); + lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p + + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); @@ -271,31 +296,23 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->rx_skb[i] = skb; /* returns physical address of skb->data */ - lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, - skb->data, - XTE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; - lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr); + lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); + lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); } - lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | - CHNL_CTRL_IRQ_EN | - CHNL_CTRL_IRQ_DLY_EN | - CHNL_CTRL_IRQ_COAL_EN); - /* 0x10220483 */ - /* 0x00100483 */ - lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | - CHNL_CTRL_IRQ_EN | - CHNL_CTRL_IRQ_DLY_EN | - CHNL_CTRL_IRQ_COAL_EN | - CHNL_CTRL_IRQ_IOE); - /* 0xff010283 */ - - lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); - lp->dma_out(lp, RX_TAILDESC_PTR, - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); - lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); + /* Configure DMA channel (irq setup) */ + lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl | + 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! + CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | + CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); + lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl | + CHNL_CTRL_IRQ_IOE | + CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | + CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); /* Init descriptor indexes */ lp->tx_bd_ci = 0; @@ -303,6 +320,15 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; + /* Enable RX DMA transfers */ + wmb(); + lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); + lp->dma_out(lp, RX_TAILDESC_PTR, + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + + /* Prepare for TX DMA transfer */ + lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); + return 0; out: @@ -319,7 +345,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) struct temac_local *lp = netdev_priv(ndev); /* set up unicast MAC address filter set its mac address */ - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); temac_indirect_out32(lp, XTE_UAW0_OFFSET, (ndev->dev_addr[0]) | (ndev->dev_addr[1] << 8) | @@ -330,7 +356,7 @@ static void temac_do_set_mac_address(struct net_device *ndev) temac_indirect_out32(lp, XTE_UAW1_OFFSET, (ndev->dev_addr[4] & 0x000000ff) | (ndev->dev_addr[5] << 8)); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); } static int temac_init_mac_address(struct net_device *ndev, const void *address) @@ -359,7 +385,7 @@ static void temac_set_multicast_list(struct net_device *ndev) u32 multi_addr_msw, multi_addr_lsw, val; int i; - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { /* @@ -398,7 +424,7 @@ static void temac_set_multicast_list(struct net_device *ndev) temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); } - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); } static struct temac_option { @@ -490,7 +516,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options) struct temac_option *tp = &temac_options[0]; int reg; - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); while (tp->opt) { reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; if (options & tp->opt) @@ -499,7 +525,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options) tp++; } lp->options |= options; - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); return 0; } @@ -518,7 +544,7 @@ static void temac_device_reset(struct net_device *ndev) dev_dbg(&ndev->dev, "%s()\n", __func__); - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); /* Reset the receiver and wait for it to finish reset */ temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); timeout = 1000; @@ -570,7 +596,7 @@ static void temac_device_reset(struct net_device *ndev) temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); /* Sync default options with HW * but leave receiver and transmitter disabled. */ @@ -598,7 +624,7 @@ static void temac_adjust_link(struct net_device *ndev) /* hash together the state values to decide if something has changed */ link_state = phy->speed | (phy->duplex << 1) | phy->link; - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); if (lp->last_link != link_state) { mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; @@ -614,23 +640,52 @@ static void temac_adjust_link(struct net_device *ndev) lp->last_link = link_state; phy_print_status(phy); } - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); +} + +#ifdef CONFIG_64BIT + +void ptr_to_txbd(void *p, struct cdmac_bd *bd) +{ + bd->app3 = (u32)(((u64)p) >> 32); + bd->app4 = (u32)((u64)p & 0xFFFFFFFF); +} + +void *ptr_from_txbd(struct cdmac_bd *bd) +{ + return (void *)(((u64)(bd->app3) << 32) | bd->app4); } +#else + +void ptr_to_txbd(void *p, struct cdmac_bd *bd) +{ + bd->app4 = (u32)p; +} + +void *ptr_from_txbd(struct cdmac_bd *bd) +{ + return (void *)(bd->app4); +} + +#endif + static void temac_start_xmit_done(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; unsigned int stat = 0; + struct sk_buff *skb; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; - stat = cur_p->app0; + stat = be32_to_cpu(cur_p->app0); while (stat & STS_CTRL_APP0_CMPLT) { - dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, - DMA_TO_DEVICE); - if (cur_p->app4) - dev_consume_skb_irq((struct sk_buff *)cur_p->app4); + dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), + be32_to_cpu(cur_p->len), DMA_TO_DEVICE); + skb = (struct sk_buff *)ptr_from_txbd(cur_p); + if (skb) + dev_consume_skb_irq(skb); cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; @@ -638,14 +693,14 @@ static void temac_start_xmit_done(struct net_device *ndev) cur_p->app4 = 0; ndev->stats.tx_packets++; - ndev->stats.tx_bytes += cur_p->len; + ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); lp->tx_bd_ci++; if (lp->tx_bd_ci >= TX_BD_NUM) lp->tx_bd_ci = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; - stat = cur_p->app0; + stat = be32_to_cpu(cur_p->app0); } netif_wake_queue(ndev); @@ -679,7 +734,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct cdmac_bd *cur_p; - dma_addr_t start_p, tail_p; + dma_addr_t start_p, tail_p, skb_dma_addr; int ii; unsigned long num_frag; skb_frag_t *frag; @@ -689,7 +744,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (temac_check_tx_bd_space(lp, num_frag)) { + if (temac_check_tx_bd_space(lp, num_frag + 1)) { if (!netif_queue_stopped(ndev)) netif_stop_queue(ndev); return NETDEV_TX_BUSY; @@ -700,16 +755,18 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) unsigned int csum_start_off = skb_checksum_start_offset(skb); unsigned int csum_index_off = csum_start_off + skb->csum_offset; - cur_p->app0 |= 1; /* TX Checksum Enabled */ - cur_p->app1 = (csum_start_off << 16) | csum_index_off; + cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */ + cur_p->app1 = cpu_to_be32((csum_start_off << 16) + | csum_index_off); cur_p->app2 = 0; /* initial checksum seed */ } - cur_p->app0 |= STS_CTRL_APP0_SOP; - cur_p->len = skb_headlen(skb); - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - cur_p->app4 = (unsigned long)skb; + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP); + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + cur_p->len = cpu_to_be32(skb_headlen(skb)); + cur_p->phys = cpu_to_be32(skb_dma_addr); + ptr_to_txbd((void *)skb, cur_p); for (ii = 0; ii < num_frag; ii++) { lp->tx_bd_tail++; @@ -717,14 +774,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) lp->tx_bd_tail = 0; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - cur_p->phys = dma_map_single(ndev->dev.parent, - skb_frag_address(frag), - skb_frag_size(frag), DMA_TO_DEVICE); - cur_p->len = skb_frag_size(frag); + skb_dma_addr = dma_map_single(ndev->dev.parent, + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); + cur_p->phys = cpu_to_be32(skb_dma_addr); + cur_p->len = cpu_to_be32(skb_frag_size(frag)); cur_p->app0 = 0; frag++; } - cur_p->app0 |= STS_CTRL_APP0_EOP; + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP); tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; lp->tx_bd_tail++; @@ -734,6 +793,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb_tx_timestamp(skb); /* Kick off the transfer */ + wmb(); lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ return NETDEV_TX_OK; @@ -746,7 +806,7 @@ static void ll_temac_recv(struct net_device *ndev) struct sk_buff *skb, *new_skb; unsigned int bdstat; struct cdmac_bd *cur_p; - dma_addr_t tail_p; + dma_addr_t tail_p, skb_dma_addr; int length; unsigned long flags; @@ -755,14 +815,14 @@ static void ll_temac_recv(struct net_device *ndev) tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - bdstat = cur_p->app0; + bdstat = be32_to_cpu(cur_p->app0); while ((bdstat & STS_CTRL_APP0_CMPLT)) { skb = lp->rx_skb[lp->rx_bd_ci]; - length = cur_p->app4 & 0x3FFF; + length = be32_to_cpu(cur_p->app4) & 0x3FFF; - dma_unmap_single(ndev->dev.parent, cur_p->phys, length, - DMA_FROM_DEVICE); + dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys), + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); @@ -773,7 +833,12 @@ static void ll_temac_recv(struct net_device *ndev) (skb->protocol == htons(ETH_P_IP)) && (skb->len > 64)) { - skb->csum = cur_p->app3 & 0xFFFF; + /* Convert from device endianness (be32) to cpu + * endiannes, and if necessary swap the bytes + * (back) for proper IP checksum byte order + * (be16). + */ + skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } @@ -790,11 +855,12 @@ static void ll_temac_recv(struct net_device *ndev) return; } - cur_p->app0 = STS_CTRL_APP0_IRQONEND; - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, - XTE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE; + cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND); + skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + cur_p->phys = cpu_to_be32(skb_dma_addr); + cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE); lp->rx_skb[lp->rx_bd_ci] = new_skb; lp->rx_bd_ci++; @@ -802,7 +868,7 @@ static void ll_temac_recv(struct net_device *ndev) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; - bdstat = cur_p->app0; + bdstat = be32_to_cpu(cur_p->app0); } lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); @@ -857,7 +923,14 @@ static int temac_open(struct net_device *ndev) dev_err(lp->dev, "of_phy_connect() failed\n"); return -ENODEV; } - + phy_start(phydev); + } else if (strlen(lp->phy_name) > 0) { + phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link, + lp->phy_interface); + if (!phydev) { + dev_err(lp->dev, "phy_connect() failed\n"); + return -ENODEV; + } phy_start(phydev); } @@ -977,22 +1050,25 @@ static const struct ethtool_ops temac_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -static int temac_of_probe(struct platform_device *op) +static int temac_probe(struct platform_device *pdev) { - struct device_node *np; + struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; + struct resource *res; const void *addr; __be32 *p; + bool little_endian; int rc = 0; /* Init network device structure */ - ndev = alloc_etherdev(sizeof(*lp)); + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp)); if (!ndev) return -ENOMEM; - platform_set_drvdata(op, ndev); - SET_NETDEV_DEV(ndev, &op->dev); + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; ndev->netdev_ops = &temac_netdev_ops; @@ -1014,89 +1090,196 @@ static int temac_of_probe(struct platform_device *op) /* setup temac private info structure */ lp = netdev_priv(ndev); lp->ndev = ndev; - lp->dev = &op->dev; + lp->dev = &pdev->dev; lp->options = XTE_OPTION_DEFAULTS; spin_lock_init(&lp->rx_lock); - mutex_init(&lp->indirect_mutex); + + /* Setup mutex for synchronization of indirect register access */ + if (pdata) { + if (!pdata->indirect_mutex) { + dev_err(&pdev->dev, + "indirect_mutex missing in platform_data\n"); + return -EINVAL; + } + lp->indirect_mutex = pdata->indirect_mutex; + } else { + lp->indirect_mutex = devm_kmalloc(&pdev->dev, + sizeof(*lp->indirect_mutex), + GFP_KERNEL); + mutex_init(lp->indirect_mutex); + } /* map device registers */ - lp->regs = of_iomap(op->dev.of_node, 0); - if (!lp->regs) { - dev_err(&op->dev, "could not map temac regs.\n"); - rc = -ENOMEM; - goto nodev; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + lp->regs = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (IS_ERR(lp->regs)) { + dev_err(&pdev->dev, "could not map TEMAC registers\n"); + return PTR_ERR(lp->regs); + } + + /* Select register access functions with the specified + * endianness mode. Default for OF devices is big-endian. + */ + little_endian = false; + if (temac_np) { + if (of_get_property(temac_np, "little-endian", NULL)) + little_endian = true; + } else if (pdata) { + little_endian = pdata->reg_little_endian; + } + if (little_endian) { + lp->temac_ior = _temac_ior_le; + lp->temac_iow = _temac_iow_le; + } else { + lp->temac_ior = _temac_ior_be; + lp->temac_iow = _temac_iow_be; } /* Setup checksum offload, but default to off if not specified */ lp->temac_features = 0; - p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); - if (p && be32_to_cpu(*p)) { - lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + if (temac_np) { + p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL); + if (p && be32_to_cpu(*p)) + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL); + if (p && be32_to_cpu(*p)) + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; + } else if (pdata) { + if (pdata->txcsum) + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + if (pdata->rxcsum) + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; + } + if (lp->temac_features & TEMAC_FEATURE_TX_CSUM) /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; - } - p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); - if (p && be32_to_cpu(*p)) - lp->temac_features |= TEMAC_FEATURE_RX_CSUM; - - /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ - np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); - if (!np) { - dev_err(&op->dev, "could not find DMA node\n"); - rc = -ENODEV; - goto err_iounmap; - } - /* Setup the DMA register accesses, could be DCR or memory mapped */ - if (temac_dcr_setup(lp, op, np)) { + /* Setup LocalLink DMA */ + if (temac_np) { + /* Find the DMA node, map the DMA registers, and + * decode the DMA IRQs. + */ + dma_np = of_parse_phandle(temac_np, "llink-connected", 0); + if (!dma_np) { + dev_err(&pdev->dev, "could not find DMA node\n"); + return -ENODEV; + } - /* no DCR in the device tree, try non-DCR */ - lp->sdma_regs = of_iomap(np, 0); - if (lp->sdma_regs) { - lp->dma_in = temac_dma_in32; - lp->dma_out = temac_dma_out32; - dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); - } else { - dev_err(&op->dev, "unable to map DMA registers\n"); - of_node_put(np); - goto err_iounmap; + /* Setup the DMA register accesses, could be DCR or + * memory mapped. + */ + if (temac_dcr_setup(lp, pdev, dma_np)) { + /* no DCR in the device tree, try non-DCR */ + lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0, + NULL); + if (IS_ERR(lp->sdma_regs)) { + dev_err(&pdev->dev, + "unable to map DMA registers\n"); + of_node_put(dma_np); + return PTR_ERR(lp->sdma_regs); + } + if (of_get_property(dma_np, "little-endian", NULL)) { + lp->dma_in = temac_dma_in32_le; + lp->dma_out = temac_dma_out32_le; + } else { + lp->dma_in = temac_dma_in32_be; + lp->dma_out = temac_dma_out32_be; + } + dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs); } - } - lp->rx_irq = irq_of_parse_and_map(np, 0); - lp->tx_irq = irq_of_parse_and_map(np, 1); + /* Get DMA RX and TX interrupts */ + lp->rx_irq = irq_of_parse_and_map(dma_np, 0); + lp->tx_irq = irq_of_parse_and_map(dma_np, 1); - of_node_put(np); /* Finished with the DMA node; drop the reference */ + /* Use defaults for IRQ delay/coalescing setup. These + * are configuration values, so does not belong in + * device-tree. + */ + lp->tx_chnl_ctrl = 0x10220000; + lp->rx_chnl_ctrl = 0xff070000; + + /* Finished with the DMA node; drop the reference */ + of_node_put(dma_np); + } else if (pdata) { + /* 2nd memory resource specifies DMA registers */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (IS_ERR(lp->sdma_regs)) { + dev_err(&pdev->dev, + "could not map DMA registers\n"); + return PTR_ERR(lp->sdma_regs); + } + if (pdata->dma_little_endian) { + lp->dma_in = temac_dma_in32_le; + lp->dma_out = temac_dma_out32_le; + } else { + lp->dma_in = temac_dma_in32_be; + lp->dma_out = temac_dma_out32_be; + } - if (!lp->rx_irq || !lp->tx_irq) { - dev_err(&op->dev, "could not determine irqs\n"); - rc = -ENOMEM; - goto err_iounmap_2; + /* Get DMA RX and TX interrupts */ + lp->rx_irq = platform_get_irq(pdev, 0); + lp->tx_irq = platform_get_irq(pdev, 1); + + /* IRQ delay/coalescing setup */ + if (pdata->tx_irq_timeout || pdata->tx_irq_count) + lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) | + (pdata->tx_irq_count << 16); + else + lp->tx_chnl_ctrl = 0x10220000; + if (pdata->rx_irq_timeout || pdata->rx_irq_count) + lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | + (pdata->rx_irq_count << 16); + else + lp->rx_chnl_ctrl = 0xff070000; } + /* Error handle returned DMA RX and TX interrupts */ + if (lp->rx_irq < 0) { + if (lp->rx_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, "could not get DMA RX irq\n"); + return lp->rx_irq; + } + if (lp->tx_irq < 0) { + if (lp->tx_irq != -EPROBE_DEFER) + dev_err(&pdev->dev, "could not get DMA TX irq\n"); + return lp->tx_irq; + } - /* Retrieve the MAC address */ - addr = of_get_mac_address(op->dev.of_node); - if (!addr) { - dev_err(&op->dev, "could not find MAC address\n"); - rc = -ENODEV; - goto err_iounmap_2; + if (temac_np) { + /* Retrieve the MAC address */ + addr = of_get_mac_address(temac_np); + if (!addr) { + dev_err(&pdev->dev, "could not find MAC address\n"); + return -ENODEV; + } + temac_init_mac_address(ndev, addr); + } else if (pdata) { + temac_init_mac_address(ndev, pdata->mac_addr); } - temac_init_mac_address(ndev, addr); - rc = temac_mdio_setup(lp, op->dev.of_node); + rc = temac_mdio_setup(lp, pdev); if (rc) - dev_warn(&op->dev, "error registering MDIO bus\n"); - - lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); - if (lp->phy_node) - dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); + dev_warn(&pdev->dev, "error registering MDIO bus\n"); + + if (temac_np) { + lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0); + if (lp->phy_node) + dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np); + } else if (pdata) { + snprintf(lp->phy_name, sizeof(lp->phy_name), + PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr); + lp->phy_interface = pdata->phy_interface; + } /* Add the device attributes */ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); if (rc) { dev_err(lp->dev, "Error creating sysfs files\n"); - goto err_iounmap_2; + goto err_sysfs_create; } rc = register_netdev(lp->ndev); @@ -1107,33 +1290,25 @@ static int temac_of_probe(struct platform_device *op) return 0; - err_register_ndev: +err_register_ndev: sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); - err_iounmap_2: - if (lp->sdma_regs) - iounmap(lp->sdma_regs); - err_iounmap: - iounmap(lp->regs); - nodev: - free_netdev(ndev); - ndev = NULL; +err_sysfs_create: + if (lp->phy_node) + of_node_put(lp->phy_node); + temac_mdio_teardown(lp); return rc; } -static int temac_of_remove(struct platform_device *op) +static int temac_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(op); + struct net_device *ndev = platform_get_drvdata(pdev); struct temac_local *lp = netdev_priv(ndev); - temac_mdio_teardown(lp); unregister_netdev(ndev); sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); - of_node_put(lp->phy_node); - lp->phy_node = NULL; - iounmap(lp->regs); - if (lp->sdma_regs) - iounmap(lp->sdma_regs); - free_netdev(ndev); + if (lp->phy_node) + of_node_put(lp->phy_node); + temac_mdio_teardown(lp); return 0; } @@ -1146,16 +1321,16 @@ static const struct of_device_id temac_of_match[] = { }; MODULE_DEVICE_TABLE(of, temac_of_match); -static struct platform_driver temac_of_driver = { - .probe = temac_of_probe, - .remove = temac_of_remove, +static struct platform_driver temac_driver = { + .probe = temac_probe, + .remove = temac_remove, .driver = { .name = "xilinx_temac", .of_match_table = temac_of_match, }, }; -module_platform_driver(temac_of_driver); +module_platform_driver(temac_driver); MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); MODULE_AUTHOR("Yoshio Kashiwagi"); diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c index f5e83ac6f7e2..c2a11703bc6d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -14,6 +14,7 @@ #include <linux/of_address.h> #include <linux/slab.h> #include <linux/of_mdio.h> +#include <linux/platform_data/xilinx-ll-temac.h> #include "ll_temac.h" @@ -28,10 +29,10 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg) /* Write the PHY address to the MIIM Access Initiator register. * When the transfer completes, the PHY register value will appear * in the LSW0 register */ - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg); rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n", phy_id, reg, rc); @@ -49,25 +50,34 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) /* First write the desired value into the write data register * and then write the address into the access initiator register */ - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val); temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); return 0; } -int temac_mdio_setup(struct temac_local *lp, struct device_node *np) +int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev) { + struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device_node *np = dev_of_node(&pdev->dev); struct mii_bus *bus; u32 bus_hz; int clk_div; int rc; struct resource res; + /* Get MDIO bus frequency (if specified) */ + bus_hz = 0; + if (np) + of_property_read_u32(np, "clock-frequency", &bus_hz); + else if (pdata) + bus_hz = pdata->mdio_clk_freq; + /* Calculate a reasonable divisor for the clock rate */ clk_div = 0x3f; /* worst-case default setting */ - if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) { + if (bus_hz != 0) { clk_div = bus_hz / (2500 * 1000 * 2) - 1; if (clk_div < 1) clk_div = 1; @@ -77,17 +87,23 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) /* Enable the MDIO bus by asserting the enable bit and writing * in the clock config */ - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); - bus = mdiobus_alloc(); + bus = devm_mdiobus_alloc(&pdev->dev); if (!bus) return -ENOMEM; - of_address_to_resource(np, 0, &res); - snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", - (unsigned long long)res.start); + if (np) { + of_address_to_resource(np, 0, &res); + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long)res.start); + } else if (pdata && pdata->mdio_bus_id >= 0) { + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", + pdata->mdio_bus_id); + } + bus->priv = lp; bus->name = "Xilinx TEMAC MDIO"; bus->read = temac_mdio_read; @@ -98,23 +114,16 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np) rc = of_mdiobus_register(bus, np); if (rc) - goto err_register; + return rc; - mutex_lock(&lp->indirect_mutex); + mutex_lock(lp->indirect_mutex); dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n", temac_indirect_in32(lp, XTE_MC_OFFSET)); - mutex_unlock(&lp->indirect_mutex); + mutex_unlock(lp->indirect_mutex); return 0; - - err_register: - mdiobus_free(bus); - return rc; } void temac_mdio_teardown(struct temac_local *lp) { mdiobus_unregister(lp->mii_bus); - mdiobus_free(lp->mii_bus); - lp->mii_bus = NULL; } - diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b03a417d0073..fc38692da71e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -17,6 +17,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> +#include <linux/ethtool.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/of_address.h> @@ -1078,6 +1079,27 @@ static bool get_bool(struct platform_device *ofdev, const char *s) return (bool)*p; } +/** + * xemaclite_ethtools_get_drvinfo - Get various Axi Emac Lite driver info + * @ndev: Pointer to net_device structure + * @ed: Pointer to ethtool_drvinfo structure + * + * This implements ethtool command for getting the driver information. + * Issue "ethtool -i ethX" under linux prompt to execute this function. + */ +static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *ed) +{ + strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); +} + +static const struct ethtool_ops xemaclite_ethtool_ops = { + .get_drvinfo = xemaclite_ethtools_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + static const struct net_device_ops xemaclite_netdev_ops; /** @@ -1164,6 +1186,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); ndev->netdev_ops = &xemaclite_netdev_ops; + ndev->ethtool_ops = &xemaclite_ethtool_ops; ndev->flags &= ~IFF_MULTICAST; ndev->watchdog_timeo = TX_TIMEOUT; @@ -1229,12 +1252,29 @@ xemaclite_poll_controller(struct net_device *ndev) } #endif +/* Ioctl MII Interface */ +static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (!dev->phydev || !netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(dev->phydev, rq, cmd); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops xemaclite_netdev_ops = { .ndo_open = xemaclite_open, .ndo_stop = xemaclite_close, .ndo_start_xmit = xemaclite_send, .ndo_set_mac_address = xemaclite_set_mac_address, .ndo_tx_timeout = xemaclite_tx_timeout, + .ndo_do_ioctl = xemaclite_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xemaclite_poll_controller, #endif diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 5583d993480d..98d1a45c0606 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/hash.h> +#include <net/ipv6_stubs.h> #include <net/dst_metadata.h> #include <net/gro_cells.h> #include <net/rtnetlink.h> @@ -22,8 +23,6 @@ #define GENEVE_NETDEV_VER "0.6" -#define GENEVE_UDP_PORT 6081 - #define GENEVE_N_VID (1u << 24) #define GENEVE_VID_MASK (GENEVE_N_VID - 1) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 7a145172d503..eaf4311b4004 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1270,21 +1270,21 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { static const struct genl_ops gtp_genl_ops[] = { { .cmd = GTP_CMD_NEWPDP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_new_pdp, - .policy = gtp_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_DELPDP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_del_pdp, - .policy = gtp_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_GETPDP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_get_pdp, .dumpit = gtp_genl_dump_pdp, - .policy = gtp_genl_policy, .flags = GENL_ADMIN_PERM, }, }; @@ -1294,6 +1294,7 @@ static struct genl_family gtp_genl_family __ro_after_init = { .version = 0, .hdrsize = 0, .maxattr = GTPA_MAX, + .policy = gtp_genl_policy, .netnsok = true, .module = THIS_MODULE, .ops = gtp_genl_ops, diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 029206e4da3b..0f7025f3a384 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1298,11 +1298,11 @@ static void rr_dump(struct net_device *dev) if (rrpriv->tx_skbuff[cons]){ len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len); printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len); - printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n", + printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n", rrpriv->tx_ring[cons].mode, rrpriv->tx_ring[cons].size, (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo, - (unsigned long)rrpriv->tx_skbuff[cons]->data, + rrpriv->tx_skbuff[cons]->data, (unsigned int)rrpriv->tx_skbuff[cons]->truesize); for (i = 0; i < len; i++){ if (!(i & 7)) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index e0dce373cdd9..fdbeb7070d42 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -966,7 +966,7 @@ int netvsc_send(struct net_device *ndev, /* Keep aggregating only if stack says more data is coming * and not doing mixed modes send and not flow blocked */ - xmit_more = skb->xmit_more && + xmit_more = netdev_xmit_more() && !packet->cp_partial && !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b20fb0fb595b..06393b215102 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -328,7 +328,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, * If a valid queue has already been assigned, then use that. * Otherwise compute tx queue based on hash and the send table. * - * This is basically similar to default (__netdev_pick_tx) with the added step + * This is basically similar to default (netdev_pick_tx) with the added step * of using the host send_table when no other queue has been assigned. * * TODO support XPS - but get_xps_queue not exported @@ -351,8 +351,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) } static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct net_device_context *ndc = netdev_priv(ndev); struct net_device *vf_netdev; @@ -364,10 +363,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; if (vf_ops->ndo_select_queue) - txq = vf_ops->ndo_select_queue(vf_netdev, skb, - sb_dev, fallback); + txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev); else - txq = fallback(vf_netdev, skb, NULL); + txq = netdev_pick_tx(vf_netdev, skb, NULL); /* Record the queue selected by VF so that it can be * used for common case where VF has more queues than diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 3b88846de31b..b187ae1a6bd6 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -227,14 +227,16 @@ static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy) return 0; } - nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES); + nl_edges = nla_nest_start_noflag(skb, + MAC802154_HWSIM_ATTR_RADIO_EDGES); if (!nl_edges) { rcu_read_unlock(); return -ENOBUFS; } list_for_each_entry_rcu(e, &phy->edges, list) { - nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE); + nl_edge = nla_nest_start_noflag(skb, + MAC802154_HWSIM_ATTR_RADIO_EDGE); if (!nl_edge) { rcu_read_unlock(); nla_nest_cancel(skb, nl_edges); @@ -428,9 +430,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, - info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], - hwsim_edge_policy, NULL)) + if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) @@ -492,9 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, - info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], - hwsim_edge_policy, NULL)) + if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID]) @@ -542,9 +540,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) return -EINVAL; - if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, - info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], - hwsim_edge_policy, NULL)) + if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) return -EINVAL; if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] && @@ -598,37 +594,37 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] = static const struct genl_ops hwsim_nl_ops[] = { { .cmd = MAC802154_HWSIM_CMD_NEW_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_DEL_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_GET_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, { .cmd = MAC802154_HWSIM_CMD_NEW_EDGE, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_edge_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_DEL_EDGE, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_edge_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = MAC802154_HWSIM_CMD_SET_EDGE, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_set_edge_lqi, .flags = GENL_UNS_ADMIN_PERM, }, @@ -638,6 +634,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = { .name = "MAC802154_HWSIM", .version = 1, .maxattr = MAC802154_HWSIM_ATTR_MAX, + .policy = hwsim_genl_policy, .module = THIS_MODULE, .ops = hwsim_nl_ops, .n_ops = ARRAY_SIZE(hwsim_nl_ops), diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 2df7f60fe052..857e4bf99883 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -128,21 +128,9 @@ static u32 always_on(struct net_device *dev) return 1; } -static int loopback_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *ts_info) -{ - ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - - ts_info->phc_index = -1; - - return 0; -}; - static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, - .get_ts_info = loopback_get_ts_info, + .get_ts_info = ethtool_op_get_ts_info, }; static int loopback_dev_init(struct net_device *dev) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 64a982563d59..009b2902c9d3 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1611,9 +1611,7 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) if (!attrs[MACSEC_ATTR_SA_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, - attrs[MACSEC_ATTR_SA_CONFIG], - macsec_genl_sa_policy, NULL)) + if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) return -EINVAL; return 0; @@ -1624,9 +1622,7 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) return -EINVAL; - if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, - attrs[MACSEC_ATTR_RXSC_CONFIG], - macsec_genl_rxsc_policy, NULL)) + if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) return -EINVAL; return 0; @@ -2175,8 +2171,9 @@ static int copy_tx_sa_stats(struct sk_buff *skb, return 0; } -static int copy_rx_sa_stats(struct sk_buff *skb, - struct macsec_rx_sa_stats __percpu *pstats) +static noinline_for_stack int +copy_rx_sa_stats(struct sk_buff *skb, + struct macsec_rx_sa_stats __percpu *pstats) { struct macsec_rx_sa_stats sum = {0, }; int cpu; @@ -2201,8 +2198,8 @@ static int copy_rx_sa_stats(struct sk_buff *skb, return 0; } -static int copy_rx_sc_stats(struct sk_buff *skb, - struct pcpu_rx_sc_stats __percpu *pstats) +static noinline_for_stack int +copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) { struct macsec_rx_sc_stats sum = {0, }; int cpu; @@ -2265,8 +2262,8 @@ static int copy_rx_sc_stats(struct sk_buff *skb, return 0; } -static int copy_tx_sc_stats(struct sk_buff *skb, - struct pcpu_tx_sc_stats __percpu *pstats) +static noinline_for_stack int +copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) { struct macsec_tx_sc_stats sum = {0, }; int cpu; @@ -2305,8 +2302,8 @@ static int copy_tx_sc_stats(struct sk_buff *skb, return 0; } -static int copy_secy_stats(struct sk_buff *skb, - struct pcpu_secy_stats __percpu *pstats) +static noinline_for_stack int +copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) { struct macsec_dev_stats sum = {0, }; int cpu; @@ -2364,7 +2361,8 @@ static int copy_secy_stats(struct sk_buff *skb, static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) { struct macsec_tx_sc *tx_sc = &secy->tx_sc; - struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); + struct nlattr *secy_nest = nla_nest_start_noflag(skb, + MACSEC_ATTR_SECY); u64 csid; if (!secy_nest) @@ -2410,8 +2408,9 @@ cancel: return 1; } -static int dump_secy(struct macsec_secy *secy, struct net_device *dev, - struct sk_buff *skb, struct netlink_callback *cb) +static noinline_for_stack int +dump_secy(struct macsec_secy *secy, struct net_device *dev, + struct sk_buff *skb, struct netlink_callback *cb) { struct macsec_rx_sc *rx_sc; struct macsec_tx_sc *tx_sc = &secy->tx_sc; @@ -2433,7 +2432,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (nla_put_secy(secy, skb)) goto nla_put_failure; - attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); if (!attr) goto nla_put_failure; if (copy_tx_sc_stats(skb, tx_sc->stats)) { @@ -2442,7 +2441,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, attr); - attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); if (!attr) goto nla_put_failure; if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { @@ -2451,7 +2450,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, attr); - txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); + txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); if (!txsa_list) goto nla_put_failure; for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { @@ -2461,7 +2460,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (!tx_sa) continue; - txsa_nest = nla_nest_start(skb, j++); + txsa_nest = nla_nest_start_noflag(skb, j++); if (!txsa_nest) { nla_nest_cancel(skb, txsa_list); goto nla_put_failure; @@ -2476,7 +2475,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_list); @@ -2494,7 +2493,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, txsa_list); - rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); + rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); if (!rxsc_list) goto nla_put_failure; @@ -2502,7 +2501,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, for_each_rxsc_rtnl(secy, rx_sc) { int k; struct nlattr *rxsa_list; - struct nlattr *rxsc_nest = nla_nest_start(skb, j++); + struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); if (!rxsc_nest) { nla_nest_cancel(skb, rxsc_list); @@ -2517,7 +2516,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); + attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); @@ -2531,7 +2530,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, } nla_nest_end(skb, attr); - rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); + rxsa_list = nla_nest_start_noflag(skb, + MACSEC_RXSC_ATTR_SA_LIST); if (!rxsa_list) { nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_list); @@ -2545,7 +2545,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, if (!rx_sa) continue; - rxsa_nest = nla_nest_start(skb, k++); + rxsa_nest = nla_nest_start_noflag(skb, k++); if (!rxsa_nest) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); @@ -2553,7 +2553,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, goto nla_put_failure; } - attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); + attr = nla_nest_start_noflag(skb, + MACSEC_SA_ATTR_STATS); if (!attr) { nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsc_nest); @@ -2636,61 +2637,61 @@ done: static const struct genl_ops macsec_genl_ops[] = { { .cmd = MACSEC_CMD_GET_TXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = macsec_dump_txsc, - .policy = macsec_genl_policy, }, { .cmd = MACSEC_CMD_ADD_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSC, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsc, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_TXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_txsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_ADD_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_add_rxsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_DEL_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_del_rxsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = MACSEC_CMD_UPD_RXSA, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = macsec_upd_rxsa, - .policy = macsec_genl_policy, .flags = GENL_ADMIN_PERM, }, }; @@ -2700,6 +2701,7 @@ static struct genl_family macsec_fam __ro_after_init = { .hdrsize = 0, .version = MACSEC_GENL_VERSION, .maxattr = MACSEC_ATTR_MAX, + .policy = macsec_genl_policy, .netnsok = true, .module = THIS_MODULE, .ops = macsec_genl_ops, diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0c0f105657d3..b395423b19bc 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -24,6 +24,7 @@ #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/net_tstamp.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> @@ -34,6 +35,7 @@ #include <net/rtnetlink.h> #include <net/xfrm.h> #include <linux/netpoll.h> +#include <linux/phy.h> #define MACVLAN_HASH_BITS 8 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) @@ -822,6 +824,30 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } +static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct net_device *real_dev = macvlan_dev_real_dev(dev); + const struct net_device_ops *ops = real_dev->netdev_ops; + struct ifreq ifrr; + int err = -EOPNOTSUPP; + + strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); + ifrr.ifr_ifru = ifr->ifr_ifru; + + switch (cmd) { + case SIOCSHWTSTAMP: + case SIOCGHWTSTAMP: + if (netif_device_present(real_dev) && ops->ndo_do_ioctl) + err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); + break; + } + + if (!err) + ifr->ifr_ifru = ifrr.ifr_ifru; + + return err; +} + /* * macvlan network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a @@ -1020,6 +1046,26 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, return __ethtool_get_link_ksettings(vlan->lowerdev, cmd); } +static int macvlan_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct net_device *real_dev = macvlan_dev_real_dev(dev); + const struct ethtool_ops *ops = real_dev->ethtool_ops; + struct phy_device *phydev = real_dev->phydev; + + if (phydev && phydev->drv && phydev->drv->ts_info) { + return phydev->drv->ts_info(phydev, info); + } else if (ops->get_ts_info) { + return ops->get_ts_info(real_dev, info); + } else { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + } + + return 0; +} + static netdev_features_t macvlan_fix_features(struct net_device *dev, netdev_features_t features) { @@ -1094,6 +1140,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = macvlan_ethtool_get_link_ksettings, .get_drvinfo = macvlan_ethtool_get_drvinfo, + .get_ts_info = macvlan_ethtool_get_ts_info, }; static const struct net_device_ops macvlan_netdev_ops = { @@ -1103,6 +1150,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, + .ndo_do_ioctl = macvlan_do_ioctl, .ndo_fix_features = macvlan_fix_features, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, @@ -1576,7 +1624,7 @@ static int macvlan_fill_info(struct sk_buff *skb, if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count)) goto nla_put_failure; if (vlan->macaddr_count > 0) { - nest = nla_nest_start(skb, IFLA_MACVLAN_MACADDR_DATA); + nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA); if (nest == NULL) goto nla_put_failure; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index ed1166adaa2f..b16a1221d19b 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, static u16 net_failover_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct net_failover_info *nfo_info = netdev_priv(dev); struct net_device *primary_dev; @@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev, const struct net_device_ops *ops = primary_dev->netdev_ops; if (ops->ndo_select_queue) - txq = ops->ndo_select_queue(primary_dev, skb, - sb_dev, fallback); + txq = ops->ndo_select_queue(primary_dev, skb, sb_dev); else - txq = fallback(primary_dev, skb, NULL); + txq = netdev_pick_tx(primary_dev, skb, NULL); qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index 0fee1d06c084..09f1315d2f2a 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -3,17 +3,13 @@ obj-$(CONFIG_NETDEVSIM) += netdevsim.o netdevsim-objs := \ - netdev.o \ + netdev.o dev.o fib.o bus.o ifeq ($(CONFIG_BPF_SYSCALL),y) netdevsim-objs += \ bpf.o endif -ifneq ($(CONFIG_NET_DEVLINK),) -netdevsim-objs += devlink.o fib.o -endif - ifneq ($(CONFIG_XFRM_OFFLOAD),) netdevsim-objs += ipsec.o endif diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index f92c43453ec6..2b74425822ab 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -27,7 +27,7 @@ bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__) struct nsim_bpf_bound_prog { - struct netdevsim *ns; + struct nsim_dev *nsim_dev; struct bpf_prog *prog; struct dentry *ddir; const char *state; @@ -65,8 +65,8 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) struct nsim_bpf_bound_prog *state; state = env->prog->aux->offload->dev_priv; - if (state->ns->bpf_bind_verifier_delay && !insn_idx) - msleep(state->ns->bpf_bind_verifier_delay); + if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx) + msleep(state->nsim_dev->bpf_bind_verifier_delay); if (insn_idx == env->prog->len - 1) pr_vlog(env, "Hello from netdevsim!\n"); @@ -213,7 +213,8 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, return 0; } -static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) +static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev, + struct bpf_prog *prog) { struct nsim_bpf_bound_prog *state; char name[16]; @@ -222,13 +223,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) if (!state) return -ENOMEM; - state->ns = ns; + state->nsim_dev = nsim_dev; state->prog = prog; state->state = "verify"; /* Program id is not populated yet when we create the state. */ - sprintf(name, "%u", ns->sdev->prog_id_gen++); - state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs); + sprintf(name, "%u", nsim_dev->prog_id_gen++); + state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs); if (IS_ERR_OR_NULL(state->ddir)) { kfree(state); return -ENOMEM; @@ -239,7 +240,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) &state->state, &nsim_bpf_string_fops); debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded); - list_add_tail(&state->l, &ns->sdev->bpf_bound_progs); + list_add_tail(&state->l, &nsim_dev->bpf_bound_progs); prog->aux->offload->dev_priv = state; @@ -248,12 +249,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) static int nsim_bpf_verifier_prep(struct bpf_prog *prog) { - struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev); + struct nsim_dev *nsim_dev = + bpf_offload_dev_priv(prog->aux->offload->offdev); - if (!ns->bpf_bind_accept) + if (!nsim_dev->bpf_bind_accept) return -EOPNOTSUPP; - return nsim_bpf_create_prog(ns, prog); + return nsim_bpf_create_prog(nsim_dev, prog); } static int nsim_bpf_translate(struct bpf_prog *prog) @@ -512,7 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) } offmap->dev_ops = &nsim_bpf_map_ops; - list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps); + list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps); return 0; @@ -576,61 +578,68 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) } } -int nsim_bpf_init(struct netdevsim *ns) +int nsim_bpf_dev_init(struct nsim_dev *nsim_dev) { int err; - if (ns->sdev->refcnt == 1) { - INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs); - INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps); + INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs); + INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps); - ns->sdev->ddir_bpf_bound_progs = - debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); - if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) - return -ENOMEM; + nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs", + nsim_dev->ddir); + if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs)) + return -ENOMEM; - ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, - ns); - err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); - if (err) - return err; - } + nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev); + err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev); + if (err) + return err; + + nsim_dev->bpf_bind_accept = true; + debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir, + &nsim_dev->bpf_bind_accept); + debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir, + &nsim_dev->bpf_bind_verifier_delay); + return 0; +} + +void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev) +{ + WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs)); + WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps)); + bpf_offload_dev_destroy(nsim_dev->bpf_dev); +} + +int nsim_bpf_init(struct netdevsim *ns) +{ + struct dentry *ddir = ns->nsim_dev_port->ddir; + int err; - err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev); + err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev, + ns->netdev); if (err) - goto err_destroy_bdev; + return err; - debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, + debugfs_create_u32("bpf_offloaded_id", 0400, ddir, &ns->bpf_offloaded_id); - ns->bpf_bind_accept = true; - debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir, - &ns->bpf_bind_accept); - debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir, - &ns->bpf_bind_verifier_delay); - ns->bpf_tc_accept = true; - debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir, + debugfs_create_bool("bpf_tc_accept", 0600, ddir, &ns->bpf_tc_accept); - debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir, + debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir, &ns->bpf_tc_non_bound_accept); ns->bpf_xdpdrv_accept = true; - debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir, + debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir, &ns->bpf_xdpdrv_accept); ns->bpf_xdpoffload_accept = true; - debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir, + debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir, &ns->bpf_xdpoffload_accept); ns->bpf_map_accept = true; - debugfs_create_bool("bpf_map_accept", 0600, ns->ddir, + debugfs_create_bool("bpf_map_accept", 0600, ddir, &ns->bpf_map_accept); return 0; - -err_destroy_bdev: - if (ns->sdev->refcnt == 1) - bpf_offload_dev_destroy(ns->sdev->bpf_dev); - return err; } void nsim_bpf_uninit(struct netdevsim *ns) @@ -638,11 +647,5 @@ void nsim_bpf_uninit(struct netdevsim *ns) WARN_ON(ns->xdp.prog); WARN_ON(ns->xdp_hw.prog); WARN_ON(ns->bpf_offloaded); - bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev); - - if (ns->sdev->refcnt == 1) { - WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); - WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); - bpf_offload_dev_destroy(ns->sdev->bpf_dev); - } + bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev); } diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c new file mode 100644 index 000000000000..fd68eeac574c --- /dev/null +++ b/drivers/net/netdevsim/bus.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2019 Mellanox Technologies. All rights reserved + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +#include "netdevsim.h" + +static DEFINE_IDA(nsim_bus_dev_ids); +static LIST_HEAD(nsim_bus_dev_list); +static DEFINE_MUTEX(nsim_bus_dev_list_lock); + +static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev) +{ + return container_of(dev, struct nsim_bus_dev, dev); +} + +static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev, + unsigned int num_vfs) +{ + nsim_bus_dev->vfconfigs = kcalloc(num_vfs, + sizeof(struct nsim_vf_config), + GFP_KERNEL); + if (!nsim_bus_dev->vfconfigs) + return -ENOMEM; + nsim_bus_dev->num_vfs = num_vfs; + + return 0; +} + +static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev) +{ + kfree(nsim_bus_dev->vfconfigs); + nsim_bus_dev->vfconfigs = NULL; + nsim_bus_dev->num_vfs = 0; +} + +static ssize_t +nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + unsigned int num_vfs; + int ret; + + ret = kstrtouint(buf, 0, &num_vfs); + if (ret) + return ret; + + rtnl_lock(); + if (nsim_bus_dev->num_vfs == num_vfs) + goto exit_good; + if (nsim_bus_dev->num_vfs && num_vfs) { + ret = -EBUSY; + goto exit_unlock; + } + + if (num_vfs) { + ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs); + if (ret) + goto exit_unlock; + } else { + nsim_bus_dev_vfs_disable(nsim_bus_dev); + } +exit_good: + ret = count; +exit_unlock: + rtnl_unlock(); + + return ret; +} + +static ssize_t +nsim_bus_dev_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + + return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs); +} + +static struct device_attribute nsim_bus_dev_numvfs_attr = + __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show, + nsim_bus_dev_numvfs_store); + +static ssize_t +new_port_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + unsigned int port_index; + int ret; + + ret = kstrtouint(buf, 0, &port_index); + if (ret) + return ret; + ret = nsim_dev_port_add(nsim_bus_dev, port_index); + return ret ? ret : count; +} + +static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port); + +static ssize_t +del_port_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + unsigned int port_index; + int ret; + + ret = kstrtouint(buf, 0, &port_index); + if (ret) + return ret; + ret = nsim_dev_port_del(nsim_bus_dev, port_index); + return ret ? ret : count; +} + +static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port); + +static struct attribute *nsim_bus_dev_attrs[] = { + &nsim_bus_dev_numvfs_attr.attr, + &nsim_bus_dev_new_port_attr.attr, + &nsim_bus_dev_del_port_attr.attr, + NULL, +}; + +static const struct attribute_group nsim_bus_dev_attr_group = { + .attrs = nsim_bus_dev_attrs, +}; + +static const struct attribute_group *nsim_bus_dev_attr_groups[] = { + &nsim_bus_dev_attr_group, + NULL, +}; + +static void nsim_bus_dev_release(struct device *dev) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + + nsim_bus_dev_vfs_disable(nsim_bus_dev); +} + +static struct device_type nsim_bus_dev_type = { + .groups = nsim_bus_dev_attr_groups, + .release = nsim_bus_dev_release, +}; + +static struct nsim_bus_dev * +nsim_bus_dev_new(unsigned int id, unsigned int port_count); + +static ssize_t +new_device_store(struct bus_type *bus, const char *buf, size_t count) +{ + struct nsim_bus_dev *nsim_bus_dev; + unsigned int port_count; + unsigned int id; + int err; + + err = sscanf(buf, "%u %u", &id, &port_count); + switch (err) { + case 1: + port_count = 1; + /* fall through */ + case 2: + if (id > INT_MAX) { + pr_err("Value of \"id\" is too big.\n"); + return -EINVAL; + } + break; + default: + pr_err("Format for adding new device is \"id port_count\" (uint uint).\n"); + return -EINVAL; + } + nsim_bus_dev = nsim_bus_dev_new(id, port_count); + if (IS_ERR(nsim_bus_dev)) + return PTR_ERR(nsim_bus_dev); + + mutex_lock(&nsim_bus_dev_list_lock); + list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list); + mutex_unlock(&nsim_bus_dev_list_lock); + + return count; +} +static BUS_ATTR_WO(new_device); + +static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev); + +static ssize_t +del_device_store(struct bus_type *bus, const char *buf, size_t count) +{ + struct nsim_bus_dev *nsim_bus_dev, *tmp; + unsigned int id; + int err; + + err = sscanf(buf, "%u", &id); + switch (err) { + case 1: + if (id > INT_MAX) { + pr_err("Value of \"id\" is too big.\n"); + return -EINVAL; + } + break; + default: + pr_err("Format for deleting device is \"id\" (uint).\n"); + return -EINVAL; + } + + err = -ENOENT; + mutex_lock(&nsim_bus_dev_list_lock); + list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { + if (nsim_bus_dev->dev.id != id) + continue; + list_del(&nsim_bus_dev->list); + nsim_bus_dev_del(nsim_bus_dev); + err = 0; + break; + } + mutex_unlock(&nsim_bus_dev_list_lock); + return !err ? count : err; +} +static BUS_ATTR_WO(del_device); + +static struct attribute *nsim_bus_attrs[] = { + &bus_attr_new_device.attr, + &bus_attr_del_device.attr, + NULL +}; +ATTRIBUTE_GROUPS(nsim_bus); + +static int nsim_bus_probe(struct device *dev) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + + return nsim_dev_probe(nsim_bus_dev); +} + +static int nsim_bus_remove(struct device *dev) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + + nsim_dev_remove(nsim_bus_dev); + return 0; +} + +int nsim_num_vf(struct device *dev) +{ + struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev); + + return nsim_bus_dev->num_vfs; +} + +static struct bus_type nsim_bus = { + .name = DRV_NAME, + .dev_name = DRV_NAME, + .bus_groups = nsim_bus_groups, + .probe = nsim_bus_probe, + .remove = nsim_bus_remove, + .num_vf = nsim_num_vf, +}; + +static struct nsim_bus_dev * +nsim_bus_dev_new(unsigned int id, unsigned int port_count) +{ + struct nsim_bus_dev *nsim_bus_dev; + int err; + + nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL); + if (!nsim_bus_dev) + return ERR_PTR(-ENOMEM); + + err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL); + if (err < 0) + goto err_nsim_bus_dev_free; + nsim_bus_dev->dev.id = err; + nsim_bus_dev->dev.bus = &nsim_bus; + nsim_bus_dev->dev.type = &nsim_bus_dev_type; + nsim_bus_dev->port_count = port_count; + + err = device_register(&nsim_bus_dev->dev); + if (err) + goto err_nsim_bus_dev_id_free; + return nsim_bus_dev; + +err_nsim_bus_dev_id_free: + ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); +err_nsim_bus_dev_free: + kfree(nsim_bus_dev); + return ERR_PTR(err); +} + +static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev) +{ + device_unregister(&nsim_bus_dev->dev); + ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); + kfree(nsim_bus_dev); +} + +static struct device_driver nsim_driver = { + .name = DRV_NAME, + .bus = &nsim_bus, + .owner = THIS_MODULE, +}; + +int nsim_bus_init(void) +{ + int err; + + err = bus_register(&nsim_bus); + if (err) + return err; + err = driver_register(&nsim_driver); + if (err) + goto err_bus_unregister; + return 0; + +err_bus_unregister: + bus_unregister(&nsim_bus); + return err; +} + +void nsim_bus_exit(void) +{ + struct nsim_bus_dev *nsim_bus_dev, *tmp; + + mutex_lock(&nsim_bus_dev_list_lock); + list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) { + list_del(&nsim_bus_dev->list); + nsim_bus_dev_del(nsim_bus_dev); + } + mutex_unlock(&nsim_bus_dev_list_lock); + driver_unregister(&nsim_driver); + bus_unregister(&nsim_bus); +} diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c new file mode 100644 index 000000000000..b509b941d5ca --- /dev/null +++ b/drivers/net/netdevsim/dev.c @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2018 Cumulus Networks. All rights reserved. + * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com> + * Copyright (c) 2019 Mellanox Technologies. All rights reserved. + * + * This software is licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree. + * + * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE + * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME + * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/random.h> +#include <linux/rtnetlink.h> +#include <net/devlink.h> + +#include "netdevsim.h" + +static struct dentry *nsim_dev_ddir; + +static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) +{ + char dev_ddir_name[16]; + + sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id); + nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir); + if (IS_ERR_OR_NULL(nsim_dev->ddir)) + return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL; + nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir); + if (IS_ERR_OR_NULL(nsim_dev->ports_ddir)) + return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL; + return 0; +} + +static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev) +{ + debugfs_remove_recursive(nsim_dev->ports_ddir); + debugfs_remove_recursive(nsim_dev->ddir); +} + +static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev, + struct nsim_dev_port *nsim_dev_port) +{ + char port_ddir_name[16]; + char dev_link_name[32]; + + sprintf(port_ddir_name, "%u", nsim_dev_port->port_index); + nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name, + nsim_dev->ports_ddir); + if (IS_ERR_OR_NULL(nsim_dev_port->ddir)) + return -ENOMEM; + + sprintf(dev_link_name, "../../../" DRV_NAME "%u", + nsim_dev->nsim_bus_dev->dev.id); + debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name); + + return 0; +} + +static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port) +{ + debugfs_remove_recursive(nsim_dev_port->ddir); +} + +static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) +{ + struct nsim_dev *nsim_dev = priv; + + return nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV4_FIB, false); +} + +static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) +{ + struct nsim_dev *nsim_dev = priv; + + return nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV4_FIB_RULES, false); +} + +static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) +{ + struct nsim_dev *nsim_dev = priv; + + return nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV6_FIB, false); +} + +static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) +{ + struct nsim_dev *nsim_dev = priv; + + return nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV6_FIB_RULES, false); +} + +static int nsim_dev_resources_register(struct devlink *devlink) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + struct devlink_resource_size_params params = { + .size_max = (u64)-1, + .size_granularity = 1, + .unit = DEVLINK_RESOURCE_UNIT_ENTRY + }; + int err; + u64 n; + + /* Resources for IPv4 */ + err = devlink_resource_register(devlink, "IPv4", (u64)-1, + NSIM_RESOURCE_IPV4, + DEVLINK_RESOURCE_ID_PARENT_TOP, + ¶ms); + if (err) { + pr_err("Failed to register IPv4 top resource\n"); + goto out; + } + + n = nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV4_FIB, true); + err = devlink_resource_register(devlink, "fib", n, + NSIM_RESOURCE_IPV4_FIB, + NSIM_RESOURCE_IPV4, ¶ms); + if (err) { + pr_err("Failed to register IPv4 FIB resource\n"); + return err; + } + + n = nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV4_FIB_RULES, true); + err = devlink_resource_register(devlink, "fib-rules", n, + NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV4, ¶ms); + if (err) { + pr_err("Failed to register IPv4 FIB rules resource\n"); + return err; + } + + /* Resources for IPv6 */ + err = devlink_resource_register(devlink, "IPv6", (u64)-1, + NSIM_RESOURCE_IPV6, + DEVLINK_RESOURCE_ID_PARENT_TOP, + ¶ms); + if (err) { + pr_err("Failed to register IPv6 top resource\n"); + goto out; + } + + n = nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV6_FIB, true); + err = devlink_resource_register(devlink, "fib", n, + NSIM_RESOURCE_IPV6_FIB, + NSIM_RESOURCE_IPV6, ¶ms); + if (err) { + pr_err("Failed to register IPv6 FIB resource\n"); + return err; + } + + n = nsim_fib_get_val(nsim_dev->fib_data, + NSIM_RESOURCE_IPV6_FIB_RULES, true); + err = devlink_resource_register(devlink, "fib-rules", n, + NSIM_RESOURCE_IPV6_FIB_RULES, + NSIM_RESOURCE_IPV6, ¶ms); + if (err) { + pr_err("Failed to register IPv6 FIB rules resource\n"); + return err; + } + + devlink_resource_occ_get_register(devlink, + NSIM_RESOURCE_IPV4_FIB, + nsim_dev_ipv4_fib_resource_occ_get, + nsim_dev); + devlink_resource_occ_get_register(devlink, + NSIM_RESOURCE_IPV4_FIB_RULES, + nsim_dev_ipv4_fib_rules_res_occ_get, + nsim_dev); + devlink_resource_occ_get_register(devlink, + NSIM_RESOURCE_IPV6_FIB, + nsim_dev_ipv6_fib_resource_occ_get, + nsim_dev); + devlink_resource_occ_get_register(devlink, + NSIM_RESOURCE_IPV6_FIB_RULES, + nsim_dev_ipv6_fib_rules_res_occ_get, + nsim_dev); +out: + return err; +} + +static int nsim_dev_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) +{ + struct nsim_dev *nsim_dev = devlink_priv(devlink); + enum nsim_resource_id res_ids[] = { + NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, + NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES + }; + int i; + + for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { + int err; + u64 val; + + err = devlink_resource_size_get(devlink, res_ids[i], &val); + if (!err) { + err = nsim_fib_set_max(nsim_dev->fib_data, + res_ids[i], val, extack); + if (err) + return err; + } + } + + return 0; +} + +static const struct devlink_ops nsim_dev_devlink_ops = { + .reload = nsim_dev_reload, +}; + +static struct nsim_dev * +nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) +{ + struct nsim_dev *nsim_dev; + struct devlink *devlink; + int err; + + devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev)); + if (!devlink) + return ERR_PTR(-ENOMEM); + nsim_dev = devlink_priv(devlink); + nsim_dev->nsim_bus_dev = nsim_bus_dev; + nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id); + get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len); + INIT_LIST_HEAD(&nsim_dev->port_list); + mutex_init(&nsim_dev->port_list_lock); + + nsim_dev->fib_data = nsim_fib_create(); + if (IS_ERR(nsim_dev->fib_data)) { + err = PTR_ERR(nsim_dev->fib_data); + goto err_devlink_free; + } + + err = nsim_dev_resources_register(devlink); + if (err) + goto err_fib_destroy; + + err = devlink_register(devlink, &nsim_bus_dev->dev); + if (err) + goto err_resources_unregister; + + err = nsim_dev_debugfs_init(nsim_dev); + if (err) + goto err_dl_unregister; + + err = nsim_bpf_dev_init(nsim_dev); + if (err) + goto err_debugfs_exit; + + return nsim_dev; + +err_debugfs_exit: + nsim_dev_debugfs_exit(nsim_dev); +err_dl_unregister: + devlink_unregister(devlink); +err_resources_unregister: + devlink_resources_unregister(devlink, NULL); +err_fib_destroy: + nsim_fib_destroy(nsim_dev->fib_data); +err_devlink_free: + devlink_free(devlink); + return ERR_PTR(err); +} + +static void nsim_dev_destroy(struct nsim_dev *nsim_dev) +{ + struct devlink *devlink = priv_to_devlink(nsim_dev); + + nsim_bpf_dev_exit(nsim_dev); + nsim_dev_debugfs_exit(nsim_dev); + devlink_unregister(devlink); + devlink_resources_unregister(devlink, NULL); + nsim_fib_destroy(nsim_dev->fib_data); + mutex_destroy(&nsim_dev->port_list_lock); + devlink_free(devlink); +} + +static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, + unsigned int port_index) +{ + struct nsim_dev_port *nsim_dev_port; + struct devlink_port *devlink_port; + int err; + + nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL); + if (!nsim_dev_port) + return -ENOMEM; + nsim_dev_port->port_index = port_index; + + devlink_port = &nsim_dev_port->devlink_port; + devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + port_index + 1, 0, 0, + nsim_dev->switch_id.id, + nsim_dev->switch_id.id_len); + err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port, + port_index); + if (err) + goto err_port_free; + + err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port); + if (err) + goto err_dl_port_unregister; + + nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port); + if (IS_ERR(nsim_dev_port->ns)) { + err = PTR_ERR(nsim_dev_port->ns); + goto err_port_debugfs_exit; + } + + devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev); + list_add(&nsim_dev_port->list, &nsim_dev->port_list); + + return 0; + +err_port_debugfs_exit: + nsim_dev_port_debugfs_exit(nsim_dev_port); +err_dl_port_unregister: + devlink_port_unregister(devlink_port); +err_port_free: + kfree(nsim_dev_port); + return err; +} + +static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port) +{ + struct devlink_port *devlink_port = &nsim_dev_port->devlink_port; + + list_del(&nsim_dev_port->list); + devlink_port_type_clear(devlink_port); + nsim_destroy(nsim_dev_port->ns); + nsim_dev_port_debugfs_exit(nsim_dev_port); + devlink_port_unregister(devlink_port); + kfree(nsim_dev_port); +} + +static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev) +{ + struct nsim_dev_port *nsim_dev_port, *tmp; + + list_for_each_entry_safe(nsim_dev_port, tmp, + &nsim_dev->port_list, list) + __nsim_dev_port_del(nsim_dev_port); +} + +int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) +{ + struct nsim_dev *nsim_dev; + int i; + int err; + + nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count); + if (IS_ERR(nsim_dev)) + return PTR_ERR(nsim_dev); + dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev); + + for (i = 0; i < nsim_bus_dev->port_count; i++) { + err = __nsim_dev_port_add(nsim_dev, i); + if (err) + goto err_port_del_all; + } + return 0; + +err_port_del_all: + nsim_dev_port_del_all(nsim_dev); + nsim_dev_destroy(nsim_dev); + return err; +} + +void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev) +{ + struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); + + nsim_dev_port_del_all(nsim_dev); + nsim_dev_destroy(nsim_dev); +} + +static struct nsim_dev_port * +__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index) +{ + struct nsim_dev_port *nsim_dev_port; + + list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) + if (nsim_dev_port->port_index == port_index) + return nsim_dev_port; + return NULL; +} + +int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, + unsigned int port_index) +{ + struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); + int err; + + mutex_lock(&nsim_dev->port_list_lock); + if (__nsim_dev_port_lookup(nsim_dev, port_index)) + err = -EEXIST; + else + err = __nsim_dev_port_add(nsim_dev, port_index); + mutex_unlock(&nsim_dev->port_list_lock); + return err; +} + +int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, + unsigned int port_index) +{ + struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev); + struct nsim_dev_port *nsim_dev_port; + int err = 0; + + mutex_lock(&nsim_dev->port_list_lock); + nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index); + if (!nsim_dev_port) + err = -ENOENT; + else + __nsim_dev_port_del(nsim_dev_port); + mutex_unlock(&nsim_dev->port_list_lock); + return err; +} + +int nsim_dev_init(void) +{ + nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); + if (IS_ERR_OR_NULL(nsim_dev_ddir)) + return -ENOMEM; + return 0; +} + +void nsim_dev_exit(void) +{ + debugfs_remove_recursive(nsim_dev_ddir); +} diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c deleted file mode 100644 index 5135fc371f01..000000000000 --- a/drivers/net/netdevsim/devlink.c +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2018 Cumulus Networks. All rights reserved. - * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com> - * - * This software is licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree. - * - * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" - * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE - * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME - * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - */ - -#include <linux/device.h> -#include <net/devlink.h> -#include <net/netns/generic.h> - -#include "netdevsim.h" - -static unsigned int nsim_devlink_id; - -/* place holder until devlink and namespaces is sorted out */ -static struct net *nsim_devlink_net(struct devlink *devlink) -{ - return &init_net; -} - -/* IPv4 - */ -static u64 nsim_ipv4_fib_resource_occ_get(void *priv) -{ - struct net *net = priv; - - return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); -} - -static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv) -{ - struct net *net = priv; - - return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); -} - -/* IPv6 - */ -static u64 nsim_ipv6_fib_resource_occ_get(void *priv) -{ - struct net *net = priv; - - return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); -} - -static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv) -{ - struct net *net = priv; - - return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); -} - -static int devlink_resources_register(struct devlink *devlink) -{ - struct devlink_resource_size_params params = { - .size_max = (u64)-1, - .size_granularity = 1, - .unit = DEVLINK_RESOURCE_UNIT_ENTRY - }; - struct net *net = nsim_devlink_net(devlink); - int err; - u64 n; - - /* Resources for IPv4 */ - err = devlink_resource_register(devlink, "IPv4", (u64)-1, - NSIM_RESOURCE_IPV4, - DEVLINK_RESOURCE_ID_PARENT_TOP, - ¶ms); - if (err) { - pr_err("Failed to register IPv4 top resource\n"); - goto out; - } - - n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); - err = devlink_resource_register(devlink, "fib", n, - NSIM_RESOURCE_IPV4_FIB, - NSIM_RESOURCE_IPV4, ¶ms); - if (err) { - pr_err("Failed to register IPv4 FIB resource\n"); - return err; - } - - n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); - err = devlink_resource_register(devlink, "fib-rules", n, - NSIM_RESOURCE_IPV4_FIB_RULES, - NSIM_RESOURCE_IPV4, ¶ms); - if (err) { - pr_err("Failed to register IPv4 FIB rules resource\n"); - return err; - } - - /* Resources for IPv6 */ - err = devlink_resource_register(devlink, "IPv6", (u64)-1, - NSIM_RESOURCE_IPV6, - DEVLINK_RESOURCE_ID_PARENT_TOP, - ¶ms); - if (err) { - pr_err("Failed to register IPv6 top resource\n"); - goto out; - } - - n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); - err = devlink_resource_register(devlink, "fib", n, - NSIM_RESOURCE_IPV6_FIB, - NSIM_RESOURCE_IPV6, ¶ms); - if (err) { - pr_err("Failed to register IPv6 FIB resource\n"); - return err; - } - - n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); - err = devlink_resource_register(devlink, "fib-rules", n, - NSIM_RESOURCE_IPV6_FIB_RULES, - NSIM_RESOURCE_IPV6, ¶ms); - if (err) { - pr_err("Failed to register IPv6 FIB rules resource\n"); - return err; - } - - devlink_resource_occ_get_register(devlink, - NSIM_RESOURCE_IPV4_FIB, - nsim_ipv4_fib_resource_occ_get, - net); - devlink_resource_occ_get_register(devlink, - NSIM_RESOURCE_IPV4_FIB_RULES, - nsim_ipv4_fib_rules_res_occ_get, - net); - devlink_resource_occ_get_register(devlink, - NSIM_RESOURCE_IPV6_FIB, - nsim_ipv6_fib_resource_occ_get, - net); - devlink_resource_occ_get_register(devlink, - NSIM_RESOURCE_IPV6_FIB_RULES, - nsim_ipv6_fib_rules_res_occ_get, - net); -out: - return err; -} - -static int nsim_devlink_reload(struct devlink *devlink, - struct netlink_ext_ack *extack) -{ - enum nsim_resource_id res_ids[] = { - NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, - NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES - }; - struct net *net = nsim_devlink_net(devlink); - int i; - - for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { - int err; - u64 val; - - err = devlink_resource_size_get(devlink, res_ids[i], &val); - if (!err) { - err = nsim_fib_set_max(net, res_ids[i], val, extack); - if (err) - return err; - } - } - - return 0; -} - -static void nsim_devlink_net_reset(struct net *net) -{ - enum nsim_resource_id res_ids[] = { - NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, - NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES - }; - int i; - - for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { - if (nsim_fib_set_max(net, res_ids[i], (u64)-1, NULL)) { - pr_err("Failed to reset limit for resource %u\n", - res_ids[i]); - } - } -} - -static const struct devlink_ops nsim_devlink_ops = { - .reload = nsim_devlink_reload, -}; - -/* once devlink / namespace issues are sorted out - * this needs to be net in which a devlink instance - * is to be created. e.g., dev_net(ns->netdev) - */ -static struct net *nsim_to_net(struct netdevsim *ns) -{ - return &init_net; -} - -void nsim_devlink_teardown(struct netdevsim *ns) -{ - if (ns->devlink) { - struct net *net = nsim_to_net(ns); - bool *reg_devlink = net_generic(net, nsim_devlink_id); - - devlink_resources_unregister(ns->devlink, NULL); - devlink_unregister(ns->devlink); - devlink_free(ns->devlink); - ns->devlink = NULL; - - nsim_devlink_net_reset(net); - *reg_devlink = true; - } -} - -int nsim_devlink_setup(struct netdevsim *ns) -{ - struct net *net = nsim_to_net(ns); - bool *reg_devlink = net_generic(net, nsim_devlink_id); - struct devlink *devlink; - int err; - - /* only one device per namespace controls devlink */ - if (!*reg_devlink) { - ns->devlink = NULL; - return 0; - } - - devlink = devlink_alloc(&nsim_devlink_ops, 0); - if (!devlink) - return -ENOMEM; - - err = devlink_register(devlink, &ns->dev); - if (err) - goto err_devlink_free; - - err = devlink_resources_register(devlink); - if (err) - goto err_dl_unregister; - - ns->devlink = devlink; - - *reg_devlink = false; - - return 0; - -err_dl_unregister: - devlink_unregister(devlink); -err_devlink_free: - devlink_free(devlink); - - return err; -} - -/* Initialize per network namespace state */ -static int __net_init nsim_devlink_netns_init(struct net *net) -{ - bool *reg_devlink = net_generic(net, nsim_devlink_id); - - *reg_devlink = true; - - return 0; -} - -static struct pernet_operations nsim_devlink_net_ops = { - .init = nsim_devlink_netns_init, - .id = &nsim_devlink_id, - .size = sizeof(bool), -}; - -void nsim_devlink_exit(void) -{ - unregister_pernet_subsys(&nsim_devlink_net_ops); - nsim_fib_exit(); -} - -int nsim_devlink_init(void) -{ - int err; - - err = nsim_fib_init(); - if (err) - goto err_out; - - err = register_pernet_subsys(&nsim_devlink_net_ops); - if (err) - nsim_fib_exit(); - -err_out: - return err; -} diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index f61d094746c0..8c57ba747772 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -18,7 +18,6 @@ #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/fib_rules.h> -#include <net/netns/generic.h> #include "netdevsim.h" @@ -33,15 +32,14 @@ struct nsim_per_fib_data { }; struct nsim_fib_data { + struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; }; -static unsigned int nsim_fib_net_id; - -u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) +u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, + enum nsim_resource_id res_id, bool max) { - struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; switch (res_id) { @@ -64,10 +62,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, +int nsim_fib_set_max(struct nsim_fib_data *fib_data, + enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack) { - struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; int err = 0; @@ -120,9 +118,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add) +static int nsim_fib_rule_event(struct nsim_fib_data *data, + struct fib_notifier_info *info, bool add) { - struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -157,9 +155,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add, return err; } -static int nsim_fib_event(struct fib_notifier_info *info, bool add) +static int nsim_fib_event(struct nsim_fib_data *data, + struct fib_notifier_info *info, bool add) { - struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id); struct netlink_ext_ack *extack = info->extack; int err = 0; @@ -178,18 +176,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add) static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { + struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, + fib_nb); struct fib_notifier_info *info = ptr; int err = 0; switch (event) { case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD); + err = nsim_fib_rule_event(data, info, + event == FIB_EVENT_RULE_ADD); break; case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD); + err = nsim_fib_event(data, info, + event == FIB_EVENT_ENTRY_ADD); break; } @@ -199,30 +201,23 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, /* inconsistent dump, trying again */ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) { - struct nsim_fib_data *data; - struct net *net; - - rcu_read_lock(); - for_each_net_rcu(net) { - data = net_generic(net, nsim_fib_net_id); - - data->ipv4.fib.num = 0ULL; - data->ipv4.rules.num = 0ULL; + struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, + fib_nb); - data->ipv6.fib.num = 0ULL; - data->ipv6.rules.num = 0ULL; - } - rcu_read_unlock(); + data->ipv4.fib.num = 0ULL; + data->ipv4.rules.num = 0ULL; + data->ipv6.fib.num = 0ULL; + data->ipv6.rules.num = 0ULL; } -static struct notifier_block nsim_fib_nb = { - .notifier_call = nsim_fib_event_nb, -}; - -/* Initialize per network namespace state */ -static int __net_init nsim_fib_netns_init(struct net *net) +struct nsim_fib_data *nsim_fib_create(void) { - struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id); + struct nsim_fib_data *data; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); data->ipv4.fib.max = (u64)-1; data->ipv4.rules.max = (u64)-1; @@ -230,37 +225,22 @@ static int __net_init nsim_fib_netns_init(struct net *net) data->ipv6.fib.max = (u64)-1; data->ipv6.rules.max = (u64)-1; - return 0; -} - -static struct pernet_operations nsim_fib_net_ops = { - .init = nsim_fib_netns_init, - .id = &nsim_fib_net_id, - .size = sizeof(struct nsim_fib_data), -}; - -void nsim_fib_exit(void) -{ - unregister_pernet_subsys(&nsim_fib_net_ops); - unregister_fib_notifier(&nsim_fib_nb); -} - -int nsim_fib_init(void) -{ - int err; - - err = register_pernet_subsys(&nsim_fib_net_ops); - if (err < 0) { - pr_err("Failed to register pernet subsystem\n"); - goto err_out; - } - - err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent); - if (err < 0) { + data->fib_nb.notifier_call = nsim_fib_event_nb; + err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); + if (err) { pr_err("Failed to register fib notifier\n"); goto err_out; } + return data; + err_out: - return err; + kfree(data); + return ERR_PTR(err); +} + +void nsim_fib_destroy(struct nsim_fib_data *data) +{ + unregister_fib_notifier(&data->fib_nb); + kfree(data); } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 76e11d889bb6..e27fc1a4516d 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -283,7 +283,8 @@ void nsim_ipsec_init(struct netdevsim *ns) ns->netdev->features |= NSIM_ESP_FEATURES; ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES; - ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns, + ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, + ns->nsim_dev_port->ddir, ns, &ipsec_dbg_fops); } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 75a50b59cb8f..e5c8aa08e1cd 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -25,230 +25,6 @@ #include "netdevsim.h" -struct nsim_vf_config { - int link_state; - u16 min_tx_rate; - u16 max_tx_rate; - u16 vlan; - __be16 vlan_proto; - u16 qos; - u8 vf_mac[ETH_ALEN]; - bool spoofchk_enabled; - bool trusted; - bool rss_query_enabled; -}; - -static u32 nsim_dev_id; - -static struct dentry *nsim_ddir; -static struct dentry *nsim_sdev_ddir; - -static int nsim_num_vf(struct device *dev) -{ - struct netdevsim *ns = to_nsim(dev); - - return ns->num_vfs; -} - -static struct bus_type nsim_bus = { - .name = DRV_NAME, - .dev_name = DRV_NAME, - .num_vf = nsim_num_vf, -}; - -static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs) -{ - ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config), - GFP_KERNEL); - if (!ns->vfconfigs) - return -ENOMEM; - ns->num_vfs = num_vfs; - - return 0; -} - -static void nsim_vfs_disable(struct netdevsim *ns) -{ - kfree(ns->vfconfigs); - ns->vfconfigs = NULL; - ns->num_vfs = 0; -} - -static ssize_t -nsim_numvfs_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct netdevsim *ns = to_nsim(dev); - unsigned int num_vfs; - int ret; - - ret = kstrtouint(buf, 0, &num_vfs); - if (ret) - return ret; - - rtnl_lock(); - if (ns->num_vfs == num_vfs) - goto exit_good; - if (ns->num_vfs && num_vfs) { - ret = -EBUSY; - goto exit_unlock; - } - - if (num_vfs) { - ret = nsim_vfs_enable(ns, num_vfs); - if (ret) - goto exit_unlock; - } else { - nsim_vfs_disable(ns); - } -exit_good: - ret = count; -exit_unlock: - rtnl_unlock(); - - return ret; -} - -static ssize_t -nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct netdevsim *ns = to_nsim(dev); - - return sprintf(buf, "%u\n", ns->num_vfs); -} - -static struct device_attribute nsim_numvfs_attr = - __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store); - -static struct attribute *nsim_dev_attrs[] = { - &nsim_numvfs_attr.attr, - NULL, -}; - -static const struct attribute_group nsim_dev_attr_group = { - .attrs = nsim_dev_attrs, -}; - -static const struct attribute_group *nsim_dev_attr_groups[] = { - &nsim_dev_attr_group, - NULL, -}; - -static void nsim_dev_release(struct device *dev) -{ - struct netdevsim *ns = to_nsim(dev); - - nsim_vfs_disable(ns); - free_netdev(ns->netdev); -} - -static struct device_type nsim_dev_type = { - .groups = nsim_dev_attr_groups, - .release = nsim_dev_release, -}; - -static int nsim_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct netdevsim *ns = netdev_priv(dev); - - ppid->id_len = sizeof(ns->sdev->switch_id); - memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len); - return 0; -} - -static int nsim_init(struct net_device *dev) -{ - char sdev_ddir_name[10], sdev_link_name[32]; - struct netdevsim *ns = netdev_priv(dev); - int err; - - ns->netdev = dev; - ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir); - if (IS_ERR_OR_NULL(ns->ddir)) - return -ENOMEM; - - if (!ns->sdev) { - ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL); - if (!ns->sdev) { - err = -ENOMEM; - goto err_debugfs_destroy; - } - ns->sdev->refcnt = 1; - ns->sdev->switch_id = nsim_dev_id; - sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); - ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name, - nsim_sdev_ddir); - if (IS_ERR_OR_NULL(ns->sdev->ddir)) { - err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL; - goto err_sdev_free; - } - } else { - sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id); - ns->sdev->refcnt++; - } - - sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name); - debugfs_create_symlink("sdev", ns->ddir, sdev_link_name); - - err = nsim_bpf_init(ns); - if (err) - goto err_sdev_destroy; - - ns->dev.id = nsim_dev_id++; - ns->dev.bus = &nsim_bus; - ns->dev.type = &nsim_dev_type; - err = device_register(&ns->dev); - if (err) - goto err_bpf_uninit; - - SET_NETDEV_DEV(dev, &ns->dev); - - err = nsim_devlink_setup(ns); - if (err) - goto err_unreg_dev; - - nsim_ipsec_init(ns); - - return 0; - -err_unreg_dev: - device_unregister(&ns->dev); -err_bpf_uninit: - nsim_bpf_uninit(ns); -err_sdev_destroy: - if (!--ns->sdev->refcnt) { - debugfs_remove_recursive(ns->sdev->ddir); -err_sdev_free: - kfree(ns->sdev); - } -err_debugfs_destroy: - debugfs_remove_recursive(ns->ddir); - return err; -} - -static void nsim_uninit(struct net_device *dev) -{ - struct netdevsim *ns = netdev_priv(dev); - - nsim_ipsec_teardown(ns); - nsim_devlink_teardown(ns); - debugfs_remove_recursive(ns->ddir); - nsim_bpf_uninit(ns); - if (!--ns->sdev->refcnt) { - debugfs_remove_recursive(ns->sdev->ddir); - kfree(ns->sdev); - } -} - -static void nsim_free(struct net_device *dev) -{ - struct netdevsim *ns = netdev_priv(dev); - - device_unregister(&ns->dev); - /* netdev and vf state will be freed out of device_release() */ -} - static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); @@ -325,11 +101,12 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f) static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; /* Only refuse multicast addresses, zero address can mean unset/any. */ - if (vf >= ns->num_vfs || is_multicast_ether_addr(mac)) + if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac)) return -EINVAL; - memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN); + memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN); return 0; } @@ -338,13 +115,14 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs || vlan > 4095 || qos > 7) + if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7) return -EINVAL; - ns->vfconfigs[vf].vlan = vlan; - ns->vfconfigs[vf].qos = qos; - ns->vfconfigs[vf].vlan_proto = vlan_proto; + nsim_bus_dev->vfconfigs[vf].vlan = vlan; + nsim_bus_dev->vfconfigs[vf].qos = qos; + nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto; return 0; } @@ -352,12 +130,13 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf, static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; - ns->vfconfigs[vf].min_tx_rate = min; - ns->vfconfigs[vf].max_tx_rate = max; + nsim_bus_dev->vfconfigs[vf].min_tx_rate = min; + nsim_bus_dev->vfconfigs[vf].max_tx_rate = max; return 0; } @@ -365,10 +144,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; - ns->vfconfigs[vf].spoofchk_enabled = val; + nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val; return 0; } @@ -376,10 +156,11 @@ static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; - ns->vfconfigs[vf].rss_query_enabled = val; + nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val; return 0; } @@ -387,10 +168,11 @@ static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; - ns->vfconfigs[vf].trusted = val; + nsim_bus_dev->vfconfigs[vf].trusted = val; return 0; } @@ -399,21 +181,22 @@ static int nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; ivi->vf = vf; - ivi->linkstate = ns->vfconfigs[vf].link_state; - ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate; - ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate; - ivi->vlan = ns->vfconfigs[vf].vlan; - ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto; - ivi->qos = ns->vfconfigs[vf].qos; - memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN); - ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled; - ivi->trusted = ns->vfconfigs[vf].trusted; - ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled; + ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state; + ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate; + ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate; + ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan; + ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto; + ivi->qos = nsim_bus_dev->vfconfigs[vf].qos; + memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN); + ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled; + ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted; + ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled; return 0; } @@ -421,8 +204,9 @@ nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) { struct netdevsim *ns = netdev_priv(dev); + struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev; - if (vf >= ns->num_vfs) + if (vf >= nsim_bus_dev->num_vfs) return -EINVAL; switch (state) { @@ -434,7 +218,7 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) return -EINVAL; } - ns->vfconfigs[vf].link_state = state; + nsim_bus_dev->vfconfigs[vf].link_state = state; return 0; } @@ -461,9 +245,14 @@ nsim_set_features(struct net_device *dev, netdev_features_t features) return 0; } +static struct devlink_port *nsim_get_devlink_port(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + + return &ns->nsim_dev_port->devlink_port; +} + static const struct net_device_ops nsim_netdev_ops = { - .ndo_init = nsim_init, - .ndo_uninit = nsim_uninit, .ndo_start_xmit = nsim_start_xmit, .ndo_set_rx_mode = nsim_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, @@ -481,7 +270,7 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, .ndo_bpf = nsim_bpf, - .ndo_get_port_parent_id = nsim_get_port_parent_id, + .ndo_get_devlink_port = nsim_get_devlink_port, }; static void nsim_setup(struct net_device *dev) @@ -489,9 +278,6 @@ static void nsim_setup(struct net_device *dev) ether_setup(dev); eth_hw_addr_random(dev); - dev->netdev_ops = &nsim_netdev_ops; - dev->priv_destructor = nsim_free; - dev->tx_queue_len = 0; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; @@ -506,104 +292,102 @@ static void nsim_setup(struct net_device *dev) dev->max_mtu = ETH_MAX_MTU; } -static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +struct netdevsim * +nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) { - if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) - return -EINVAL; - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) - return -EADDRNOTAVAIL; - } - return 0; + struct net_device *dev; + struct netdevsim *ns; + int err; + + dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup); + if (!dev) + return ERR_PTR(-ENOMEM); + + ns = netdev_priv(dev); + ns->netdev = dev; + ns->nsim_dev = nsim_dev; + ns->nsim_dev_port = nsim_dev_port; + ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; + SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev); + dev->netdev_ops = &nsim_netdev_ops; + + rtnl_lock(); + err = nsim_bpf_init(ns); + if (err) + goto err_free_netdev; + + nsim_ipsec_init(ns); + + err = register_netdevice(dev); + if (err) + goto err_ipsec_teardown; + rtnl_unlock(); + + return ns; + +err_ipsec_teardown: + nsim_ipsec_teardown(ns); + nsim_bpf_uninit(ns); + rtnl_unlock(); +err_free_netdev: + free_netdev(dev); + return ERR_PTR(err); } -static int nsim_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +void nsim_destroy(struct netdevsim *ns) { - struct netdevsim *ns = netdev_priv(dev); - - if (tb[IFLA_LINK]) { - struct net_device *joindev; - struct netdevsim *joinns; - - joindev = __dev_get_by_index(src_net, - nla_get_u32(tb[IFLA_LINK])); - if (!joindev) - return -ENODEV; - if (joindev->netdev_ops != &nsim_netdev_ops) - return -EINVAL; - - joinns = netdev_priv(joindev); - if (!joinns->sdev || !joinns->sdev->refcnt) - return -EINVAL; - ns->sdev = joinns->sdev; - } + struct net_device *dev = ns->netdev; - return register_netdevice(dev); + rtnl_lock(); + unregister_netdevice(dev); + nsim_ipsec_teardown(ns); + nsim_bpf_uninit(ns); + rtnl_unlock(); + free_netdev(dev); } -static void nsim_dellink(struct net_device *dev, struct list_head *head) +static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) { - unregister_netdevice_queue(dev, head); + NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device"); + return -EOPNOTSUPP; } static struct rtnl_link_ops nsim_link_ops __read_mostly = { .kind = DRV_NAME, - .priv_size = sizeof(struct netdevsim), - .setup = nsim_setup, .validate = nsim_validate, - .newlink = nsim_newlink, - .dellink = nsim_dellink, }; static int __init nsim_module_init(void) { int err; - nsim_ddir = debugfs_create_dir(DRV_NAME, NULL); - if (IS_ERR_OR_NULL(nsim_ddir)) - return -ENOMEM; - - nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL); - if (IS_ERR_OR_NULL(nsim_sdev_ddir)) { - err = -ENOMEM; - goto err_debugfs_destroy; - } - - err = bus_register(&nsim_bus); + err = nsim_dev_init(); if (err) - goto err_sdir_destroy; + return err; - err = nsim_devlink_init(); + err = nsim_bus_init(); if (err) - goto err_unreg_bus; + goto err_dev_exit; err = rtnl_link_register(&nsim_link_ops); if (err) - goto err_dl_fini; + goto err_bus_exit; return 0; -err_dl_fini: - nsim_devlink_exit(); -err_unreg_bus: - bus_unregister(&nsim_bus); -err_sdir_destroy: - debugfs_remove_recursive(nsim_sdev_ddir); -err_debugfs_destroy: - debugfs_remove_recursive(nsim_ddir); +err_bus_exit: + nsim_bus_exit(); +err_dev_exit: + nsim_dev_exit(); return err; } static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); - nsim_devlink_exit(); - bus_unregister(&nsim_bus); - debugfs_remove_recursive(nsim_sdev_ddir); - debugfs_remove_recursive(nsim_ddir); + nsim_bus_exit(); + nsim_dev_exit(); } module_init(nsim_module_init); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 384c254fafc5..3f398797c2bc 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> +#include <net/devlink.h> #include <net/xdp.h> #define DRV_NAME "netdevsim" @@ -26,26 +27,6 @@ #define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg) -struct bpf_prog; -struct bpf_offload_dev; -struct dentry; -struct nsim_vf_config; - -struct netdevsim_shared_dev { - unsigned int refcnt; - u32 switch_id; - - struct dentry *ddir; - - struct bpf_offload_dev *bpf_dev; - - struct dentry *ddir_bpf_bound_progs; - u32 prog_id_gen; - - struct list_head bpf_bound_progs; - struct list_head bpf_bound_maps; -}; - #define NSIM_IPSEC_MAX_SA_COUNT 33 #define NSIM_IPSEC_VALID BIT(31) @@ -69,18 +50,14 @@ struct nsim_ipsec { struct netdevsim { struct net_device *netdev; + struct nsim_dev *nsim_dev; + struct nsim_dev_port *nsim_dev_port; u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; - struct device dev; - struct netdevsim_shared_dev *sdev; - - struct dentry *ddir; - - unsigned int num_vfs; - struct nsim_vf_config *vfconfigs; + struct nsim_bus_dev *nsim_bus_dev; struct bpf_prog *bpf_offloaded; u32 bpf_offloaded_id; @@ -88,22 +65,22 @@ struct netdevsim { struct xdp_attachment_info xdp; struct xdp_attachment_info xdp_hw; - bool bpf_bind_accept; - u32 bpf_bind_verifier_delay; - bool bpf_tc_accept; bool bpf_tc_non_bound_accept; bool bpf_xdpdrv_accept; bool bpf_xdpoffload_accept; bool bpf_map_accept; -#if IS_ENABLED(CONFIG_NET_DEVLINK) - struct devlink *devlink; -#endif struct nsim_ipsec ipsec; }; +struct netdevsim * +nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port); +void nsim_destroy(struct netdevsim *ns); + #ifdef CONFIG_BPF_SYSCALL +int nsim_bpf_dev_init(struct nsim_dev *nsim_dev); +void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev); int nsim_bpf_init(struct netdevsim *ns); void nsim_bpf_uninit(struct netdevsim *ns); int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf); @@ -111,6 +88,15 @@ int nsim_bpf_disable_tc(struct netdevsim *ns); int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv); #else + +static inline int nsim_bpf_dev_init(struct nsim_dev *nsim_dev) +{ + return 0; +} + +static inline void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev) +{ +} static inline int nsim_bpf_init(struct netdevsim *ns) { return 0; @@ -138,7 +124,6 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, } #endif -#if IS_ENABLED(CONFIG_NET_DEVLINK) enum nsim_resource_id { NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */ NSIM_RESOURCE_IPV4, @@ -149,36 +134,47 @@ enum nsim_resource_id { NSIM_RESOURCE_IPV6_FIB_RULES, }; -int nsim_devlink_setup(struct netdevsim *ns); -void nsim_devlink_teardown(struct netdevsim *ns); +struct nsim_dev_port { + struct list_head list; + struct devlink_port devlink_port; + unsigned int port_index; + struct dentry *ddir; + struct netdevsim *ns; +}; -int nsim_devlink_init(void); -void nsim_devlink_exit(void); +struct nsim_dev { + struct nsim_bus_dev *nsim_bus_dev; + struct nsim_fib_data *fib_data; + struct dentry *ddir; + struct dentry *ports_ddir; + struct bpf_offload_dev *bpf_dev; + bool bpf_bind_accept; + u32 bpf_bind_verifier_delay; + struct dentry *ddir_bpf_bound_progs; + u32 prog_id_gen; + struct list_head bpf_bound_progs; + struct list_head bpf_bound_maps; + struct netdev_phys_item_id switch_id; + struct list_head port_list; + struct mutex port_list_lock; /* protects port list */ +}; -int nsim_fib_init(void); -void nsim_fib_exit(void); -u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, +int nsim_dev_init(void); +void nsim_dev_exit(void); +int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev); +void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev); +int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, + unsigned int port_index); +int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, + unsigned int port_index); + +struct nsim_fib_data *nsim_fib_create(void); +void nsim_fib_destroy(struct nsim_fib_data *fib_data); +u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, + enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct nsim_fib_data *fib_data, + enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack); -#else -static inline int nsim_devlink_setup(struct netdevsim *ns) -{ - return 0; -} - -static inline void nsim_devlink_teardown(struct netdevsim *ns) -{ -} - -static inline int nsim_devlink_init(void) -{ - return 0; -} - -static inline void nsim_devlink_exit(void) -{ -} -#endif #if IS_ENABLED(CONFIG_XFRM_OFFLOAD) void nsim_ipsec_init(struct netdevsim *ns); @@ -199,7 +195,26 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) } #endif -static inline struct netdevsim *to_nsim(struct device *ptr) -{ - return container_of(ptr, struct netdevsim, dev); -} +struct nsim_vf_config { + int link_state; + u16 min_tx_rate; + u16 max_tx_rate; + u16 vlan; + __be16 vlan_proto; + u16 qos; + u8 vf_mac[ETH_ALEN]; + bool spoofchk_enabled; + bool trusted; + bool rss_query_enabled; +}; + +struct nsim_bus_dev { + struct device dev; + struct list_head list; + unsigned int port_count; + unsigned int num_vfs; + struct nsim_vf_config *vfconfigs; +}; + +int nsim_bus_init(void); +void nsim_bus_exit(void); diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c new file mode 100644 index 000000000000..6712da3340d6 --- /dev/null +++ b/drivers/net/netdevsim/sdev.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */ + +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "netdevsim.h" + +static struct dentry *nsim_sdev_ddir; + +static u32 nsim_sdev_id; + +struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns) +{ + struct netdevsim_shared_dev *sdev; + char sdev_ddir_name[10]; + int err; + + if (joinns) { + if (WARN_ON(!joinns->sdev)) + return ERR_PTR(-EINVAL); + sdev = joinns->sdev; + sdev->refcnt++; + return sdev; + } + + sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); + if (!sdev) + return ERR_PTR(-ENOMEM); + sdev->refcnt = 1; + sdev->switch_id = nsim_sdev_id++; + + sprintf(sdev_ddir_name, "%u", sdev->switch_id); + sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir); + if (IS_ERR_OR_NULL(sdev->ddir)) { + err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL; + goto err_sdev_free; + } + + return sdev; + +err_sdev_free: + nsim_sdev_id--; + kfree(sdev); + return ERR_PTR(err); +} + +void nsim_sdev_put(struct netdevsim_shared_dev *sdev) +{ + if (--sdev->refcnt) + return; + debugfs_remove_recursive(sdev->ddir); + kfree(sdev); +} + +int nsim_sdev_init(void) +{ + nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL); + if (IS_ERR_OR_NULL(nsim_sdev_ddir)) + return -ENOMEM; + return 0; +} + +void nsim_sdev_exit(void) +{ + debugfs_remove_recursive(nsim_sdev_ddir); +} diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 520657945b82..d6299710d634 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -76,6 +76,17 @@ config MDIO_BUS_MUX_GPIO several child MDIO busses to a parent bus. Child bus selection is under the control of GPIO lines. +config MDIO_BUS_MUX_MESON_G12A + tristate "Amlogic G12a based MDIO bus multiplexer" + depends on ARCH_MESON || COMPILE_TEST + depends on OF_MDIO && HAS_IOMEM && COMMON_CLK + select MDIO_BUS_MUX + default m if ARCH_MESON + help + This module provides a driver for the MDIO multiplexer/glue of + the amlogic g12a SoC. The multiplexers connects either the external + or the internal MDIO bus to the parent bus. + config MDIO_BUS_MUX_MMIOREG tristate "MMIO device-controlled MDIO bus multiplexers" depends on OF_MDIO && HAS_IOMEM @@ -273,13 +284,13 @@ config BCM87XX_PHY Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs. config BCM_CYGNUS_PHY - tristate "Broadcom Cygnus SoC internal PHY" - depends on ARCH_BCM_CYGNUS || COMPILE_TEST + tristate "Broadcom Cygnus/Omega SoC internal PHY" + depends on ARCH_BCM_IPROC || COMPILE_TEST depends on MDIO_BCM_IPROC select BCM_NET_PHYLIB ---help--- This PHY driver is for the 1G internal PHYs of the Broadcom - Cygnus Family SoC. + Cygnus and Omega Family SoC. Currently supports internal PHY's used in the BCM11300, BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, @@ -397,7 +408,7 @@ config MICROCHIP_T1_PHY config MICROSEMI_PHY tristate "Microsemi PHYs" ---help--- - Currently supports VSC8530, VSC8531, VSC8540 and VSC8541 PHYs + Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs config NATIONAL_PHY tristate "National Semiconductor PHYs" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index ece5dae67174..27d7f9f3b0de 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 65b4b0960b1e..eef35f8c8d45 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -60,7 +60,7 @@ static struct phy_driver am79c_driver[] = { { .phy_id = PHY_ID_AM79C874, .name = "AM79C874", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = am79c_config_init, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 37218e5d7cc9..eed4fe3d871f 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/bitfield.h> #include <linux/phy.h> #include "aquantia.h" @@ -22,20 +23,33 @@ #define PHY_ID_AQCS109 0x03a1b5c2 #define PHY_ID_AQR405 0x03a1b4b0 +#define MDIO_PHYXS_VEND_IF_STATUS 0xe812 +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3) +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0 +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2 +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6 +#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10 + #define MDIO_AN_VEND_PROV 0xc400 #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) +#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) +#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) +#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 #define MDIO_AN_TX_VEND_STATUS1 0xc800 -#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1) -#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1) -#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1) -#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1) -#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1) -#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1) -#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1) +#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK GENMASK(3, 1) +#define MDIO_AN_TX_VEND_STATUS1_10BASET 0 +#define MDIO_AN_TX_VEND_STATUS1_100BASETX 1 +#define MDIO_AN_TX_VEND_STATUS1_1000BASET 2 +#define MDIO_AN_TX_VEND_STATUS1_10GBASET 3 +#define MDIO_AN_TX_VEND_STATUS1_2500BASET 4 +#define MDIO_AN_TX_VEND_STATUS1_5000BASET 5 #define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0) +#define MDIO_AN_TX_VEND_INT_STATUS1 0xcc00 +#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1) + #define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01 #define MDIO_AN_TX_VEND_INT_MASK2 0xd401 @@ -44,8 +58,42 @@ #define MDIO_AN_RX_LP_STAT1 0xe820 #define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15) #define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14) +#define MDIO_AN_RX_LP_STAT1_SHORT_REACH BIT(13) +#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT BIT(12) +#define MDIO_AN_RX_LP_STAT1_AQ_PHY BIT(2) + +#define MDIO_AN_RX_LP_STAT4 0xe823 +#define MDIO_AN_RX_LP_STAT4_FW_MAJOR GENMASK(15, 8) +#define MDIO_AN_RX_LP_STAT4_FW_MINOR GENMASK(7, 0) + +#define MDIO_AN_RX_VEND_STAT3 0xe832 +#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0) + +/* MDIO_MMD_C22EXT */ +#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292 +#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294 +#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297 +#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313 +#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315 +#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317 +#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318 +#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319 +#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a +#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b /* Vendor specific 1, MDIO_MMD_VEND1 */ +#define VEND1_GLOBAL_FW_ID 0x0020 +#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) +#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0) + +#define VEND1_GLOBAL_RSVD_STAT1 0xc885 +#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) +#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) + +#define VEND1_GLOBAL_RSVD_STAT9 0xc88d +#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0) +#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23 + #define VEND1_GLOBAL_INT_STD_STATUS 0xfc00 #define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01 @@ -72,6 +120,88 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) +struct aqr107_hw_stat { + const char *name; + int reg; + int size; +}; + +#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s } +static const struct aqr107_hw_stat aqr107_hw_stats[] = { + SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26), + SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26), + SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8), + SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26), + SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26), + SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8), + SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8), + SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8), + SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16), + SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22), +}; +#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats) + +struct aqr107_priv { + u64 sgmii_stats[AQR107_SGMII_STAT_SZ]; +}; + +static int aqr107_get_sset_count(struct phy_device *phydev) +{ + return AQR107_SGMII_STAT_SZ; +} + +static void aqr107_get_strings(struct phy_device *phydev, u8 *data) +{ + int i; + + for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) + strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name, + ETH_GSTRING_LEN); +} + +static u64 aqr107_get_stat(struct phy_device *phydev, int index) +{ + const struct aqr107_hw_stat *stat = aqr107_hw_stats + index; + int len_l = min(stat->size, 16); + int len_h = stat->size - len_l; + u64 ret; + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg); + if (val < 0) + return U64_MAX; + + ret = val & GENMASK(len_l - 1, 0); + if (len_h) { + val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1); + if (val < 0) + return U64_MAX; + + ret += (val & GENMASK(len_h - 1, 0)) << 16; + } + + return ret; +} + +static void aqr107_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + struct aqr107_priv *priv = phydev->priv; + u64 val; + int i; + + for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) { + val = aqr107_get_stat(phydev, i); + if (val == U64_MAX) + phydev_err(phydev, "Reading HW Statistics failed for %s\n", + aqr107_hw_stats[i].name); + else + priv->sgmii_stats[i] += val; + + data[i] = priv->sgmii_stats[i]; + } +} + static int aqr_config_aneg(struct phy_device *phydev) { bool changed = false; @@ -112,41 +242,22 @@ static int aqr_config_aneg(struct phy_device *phydev) static int aqr_config_intr(struct phy_device *phydev) { + bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED; int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_write_mmd(phydev, MDIO_MMD_AN, - MDIO_AN_TX_VEND_INT_MASK2, - MDIO_AN_TX_VEND_INT_MASK2_LINK); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_INT_STD_MASK, - VEND1_GLOBAL_INT_STD_MASK_ALL); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_INT_VEND_MASK, - VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 | - VEND1_GLOBAL_INT_VEND_MASK_AN); - } else { - err = phy_write_mmd(phydev, MDIO_MMD_AN, - MDIO_AN_TX_VEND_INT_MASK2, 0); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_INT_STD_MASK, 0); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_INT_VEND_MASK, 0); - } + err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2, + en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK, + en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0); + if (err < 0) + return err; - return err; + return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK, + en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 | + VEND1_GLOBAL_INT_VEND_MASK_AN : 0); } static int aqr_ack_interrupt(struct phy_device *phydev) @@ -178,21 +289,315 @@ static int aqr_read_status(struct phy_device *phydev) return genphy_c45_read_status(phydev); } +static int aqr107_read_downshift_event(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1); + if (val < 0) + return val; + + return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT); +} + +static int aqr107_read_rate(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1); + if (val < 0) + return val; + + switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) { + case MDIO_AN_TX_VEND_STATUS1_10BASET: + phydev->speed = SPEED_10; + break; + case MDIO_AN_TX_VEND_STATUS1_100BASETX: + phydev->speed = SPEED_100; + break; + case MDIO_AN_TX_VEND_STATUS1_1000BASET: + phydev->speed = SPEED_1000; + break; + case MDIO_AN_TX_VEND_STATUS1_2500BASET: + phydev->speed = SPEED_2500; + break; + case MDIO_AN_TX_VEND_STATUS1_5000BASET: + phydev->speed = SPEED_5000; + break; + case MDIO_AN_TX_VEND_STATUS1_10GBASET: + phydev->speed = SPEED_10000; + break; + default: + phydev->speed = SPEED_UNKNOWN; + break; + } + + if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + return 0; +} + +static int aqr107_read_status(struct phy_device *phydev) +{ + int val, ret; + + ret = aqr_read_status(phydev); + if (ret) + return ret; + + if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE) + return 0; + + val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS); + if (val < 0) + return val; + + switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) { + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR: + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI: + phydev->interface = PHY_INTERFACE_MODE_10GKR; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + default: + phydev->interface = PHY_INTERFACE_MODE_NA; + break; + } + + val = aqr107_read_downshift_event(phydev); + if (val <= 0) + return val; + + phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n"); + + /* Read downshifted rate from vendor register */ + return aqr107_read_rate(phydev); +} + +static int aqr107_get_downshift(struct phy_device *phydev, u8 *data) +{ + int val, cnt, enable; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV); + if (val < 0) + return val; + + enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val); + cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val); + + *data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE; + + return 0; +} + +static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt) +{ + int val = 0; + + if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt)) + return -E2BIG; + + if (cnt != DOWNSHIFT_DEV_DISABLE) { + val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN; + val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt); + } + + return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, + MDIO_AN_VEND_PROV_DOWNSHIFT_EN | + MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val); +} + +static int aqr107_get_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return aqr107_get_downshift(phydev, data); + default: + return -EOPNOTSUPP; + } +} + +static int aqr107_set_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, const void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return aqr107_set_downshift(phydev, *(const u8 *)data); + default: + return -EOPNOTSUPP; + } +} + +/* If we configure settings whilst firmware is still initializing the chip, + * then these settings may be overwritten. Therefore make sure chip + * initialization has completed. Use presence of the firmware ID as + * indicator for initialization having completed. + * The chip also provides a "reset completed" bit, but it's cleared after + * read. Therefore function would time out if called again. + */ +static int aqr107_wait_reset_complete(struct phy_device *phydev) +{ + int val, retries = 100; + + do { + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); + if (val < 0) + return val; + msleep(20); + } while (!val && --retries); + + return val ? 0 : -ETIMEDOUT; +} + +static void aqr107_chip_info(struct phy_device *phydev) +{ + u8 fw_major, fw_minor, build_id, prov_id; + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); + if (val < 0) + return; + + fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val); + fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val); + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1); + if (val < 0) + return; + + build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val); + prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val); + + phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n", + fw_major, fw_minor, build_id, prov_id); +} + +static int aqr107_config_init(struct phy_device *phydev) +{ + int ret; + + /* Check that the PHY interface type is compatible */ + if (phydev->interface != PHY_INTERFACE_MODE_SGMII && + phydev->interface != PHY_INTERFACE_MODE_2500BASEX && + phydev->interface != PHY_INTERFACE_MODE_10GKR) + return -ENODEV; + + ret = aqr107_wait_reset_complete(phydev); + if (!ret) + aqr107_chip_info(phydev); + + /* ensure that a latched downshift event is cleared */ + aqr107_read_downshift_event(phydev); + + return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); +} + static int aqcs109_config_init(struct phy_device *phydev) { + int ret; + + /* Check that the PHY interface type is compatible */ + if (phydev->interface != PHY_INTERFACE_MODE_SGMII && + phydev->interface != PHY_INTERFACE_MODE_2500BASEX) + return -ENODEV; + + ret = aqr107_wait_reset_complete(phydev); + if (!ret) + aqr107_chip_info(phydev); + /* AQCS109 belongs to a chip family partially supporting 10G and 5G. * PMA speed ability bits are the same for all members of the family, * AQCS109 however supports speeds up to 2.5G only. */ - return phy_set_max_speed(phydev, SPEED_2500); + ret = phy_set_max_speed(phydev, SPEED_2500); + if (ret) + return ret; + + /* ensure that a latched downshift event is cleared */ + aqr107_read_downshift_event(phydev); + + return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); +} + +static void aqr107_link_change_notify(struct phy_device *phydev) +{ + u8 fw_major, fw_minor; + bool downshift, short_reach, afr; + int mode, val; + + if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE) + return; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1); + /* call failed or link partner is no Aquantia PHY */ + if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY)) + return; + + short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH; + downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4); + if (val < 0) + return; + + fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val); + fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3); + if (val < 0) + return; + + afr = val & MDIO_AN_RX_VEND_STAT3_AFR; + + phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n", + fw_major, fw_minor, + short_reach ? ", short reach mode" : "", + downshift ? ", fast-retrain downshift advertised" : "", + afr ? ", fast reframe advertised" : ""); + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9); + if (val < 0) + return; + + mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val); + if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2) + phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); +} + +static int aqr107_suspend(struct phy_device *phydev) +{ + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} + +static int aqr107_resume(struct phy_device *phydev) +{ + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, + MDIO_CTRL1_LPOWER); +} + +static int aqr107_probe(struct phy_device *phydev) +{ + phydev->priv = devm_kzalloc(&phydev->mdio.dev, + sizeof(struct aqr107_priv), GFP_KERNEL); + if (!phydev->priv) + return -ENOMEM; + + return aqr_hwmon_probe(phydev); } static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQ1202), .name = "Aquantia AQ1202", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, @@ -201,8 +606,6 @@ static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQ2104), .name = "Aquantia AQ2104", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, @@ -211,8 +614,6 @@ static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR105), .name = "Aquantia AQR105", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, @@ -221,8 +622,6 @@ static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR106), .name = "Aquantia AQR106", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, @@ -231,31 +630,42 @@ static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR107), .name = "Aquantia AQR107", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, - .probe = aqr_hwmon_probe, + .probe = aqr107_probe, + .config_init = aqr107_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, - .read_status = aqr_read_status, + .read_status = aqr107_read_status, + .get_tunable = aqr107_get_tunable, + .set_tunable = aqr107_set_tunable, + .suspend = aqr107_suspend, + .resume = aqr107_resume, + .get_sset_count = aqr107_get_sset_count, + .get_strings = aqr107_get_strings, + .get_stats = aqr107_get_stats, + .link_change_notify = aqr107_link_change_notify, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109), .name = "Aquantia AQCS109", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, - .probe = aqr_hwmon_probe, + .probe = aqr107_probe, .config_init = aqcs109_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, - .read_status = aqr_read_status, + .read_status = aqr107_read_status, + .get_tunable = aqr107_get_tunable, + .set_tunable = aqr107_set_tunable, + .suspend = aqr107_suspend, + .resume = aqr107_resume, + .get_sset_count = aqr107_get_sset_count, + .get_strings = aqr107_get_strings, + .get_stats = aqr107_get_stats, + .link_change_notify = aqr107_link_change_notify, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR405), .name = "Aquantia AQR405", - .aneg_done = genphy_c45_aneg_done, - .get_features = genphy_c45_pma_read_abilities, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, .ack_interrupt = aqr_ack_interrupt, diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c index f14ba5366b91..79bf7ef1fcfd 100644 --- a/drivers/net/phy/asix.c +++ b/drivers/net/phy/asix.c @@ -43,7 +43,7 @@ static struct phy_driver asix_driver[] = { { .phy_id = PHY_ID_ASIX_AX88796B, .name = "Asix Electronics AX88796B", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .soft_reset = asix_soft_reset, } }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index f3e96191eb6f..222ccd9ecfce 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -324,8 +324,6 @@ static int at803x_config_intr(struct phy_device *phydev) static void at803x_link_change_notify(struct phy_device *phydev) { - struct at803x_priv *priv = phydev->priv; - /* * Conduct a hardware reset for AT8030 every time a link loss is * signalled. This is necessary to circumvent a hardware bug that @@ -333,25 +331,19 @@ static void at803x_link_change_notify(struct phy_device *phydev) * in the FIFO. In such cases, the FIFO enters an error mode it * cannot recover from by software. */ - if (phydev->state == PHY_NOLINK) { - if (phydev->mdio.reset && !priv->phy_reset) { - struct at803x_context context; + if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) { + struct at803x_context context; - at803x_context_save(phydev, &context); + at803x_context_save(phydev, &context); - phy_device_reset(phydev, 1); - msleep(1); - phy_device_reset(phydev, 0); - msleep(1); + phy_device_reset(phydev, 1); + msleep(1); + phy_device_reset(phydev, 0); + msleep(1); - at803x_context_restore(phydev, &context); + at803x_context_restore(phydev, &context); - phydev_dbg(phydev, "%s(): phy was reset\n", - __func__); - priv->phy_reset = true; - } - } else { - priv->phy_reset = false; + phydev_dbg(phydev, "%s(): phy was reset\n", __func__); } } @@ -397,7 +389,7 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -412,7 +404,7 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -426,7 +418,7 @@ static struct phy_driver at803x_driver[] = { .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index ab8e12922bf9..9ccf28b0a04d 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -10,6 +10,10 @@ #include <linux/netdevice.h> #include <linux/phy.h> +struct bcm_omega_phy_priv { + u64 *stats; +}; + /* Broadcom Cygnus Phy specific registers */ #define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0 0x91E5 /* VDAL Control register */ @@ -121,21 +125,162 @@ static int bcm_cygnus_resume(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int bcm_omega_config_init(struct phy_device *phydev) +{ + u8 count, rev; + int ret = 0; + + rev = phydev->phy_id & ~phydev->drv->phy_id_mask; + + pr_info_once("%s: %s PHY revision: 0x%02x\n", + phydev_name(phydev), phydev->drv->name, rev); + + /* Dummy read to a register to workaround an issue upon reset where the + * internal inverter may not allow the first MDIO transaction to pass + * the MDIO management controller and make us return 0xffff for such + * reads. + */ + phy_read(phydev, MII_BMSR); + + switch (rev) { + case 0x00: + ret = bcm_phy_28nm_a0b0_afe_config_init(phydev); + break; + default: + break; + } + + if (ret) + return ret; + + ret = bcm_phy_downshift_get(phydev, &count); + if (ret) + return ret; + + /* Only enable EEE if Wirespeed/downshift is disabled */ + ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE); + if (ret) + return ret; + + return bcm_phy_enable_apd(phydev, true); +} + +static int bcm_omega_resume(struct phy_device *phydev) +{ + int ret; + + /* Re-apply workarounds coming out suspend/resume */ + ret = bcm_omega_config_init(phydev); + if (ret) + return ret; + + /* 28nm Gigabit PHYs come out of reset without any half-duplex + * or "hub" compliant advertised mode, fix that. This does not + * cause any problems with the PHY library since genphy_config_aneg() + * gracefully handles auto-negotiated and forced modes. + */ + return genphy_config_aneg(phydev); +} + +static int bcm_omega_get_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return bcm_phy_downshift_get(phydev, (u8 *)data); + default: + return -EOPNOTSUPP; + } +} + +static int bcm_omega_set_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, + const void *data) +{ + u8 count = *(u8 *)data; + int ret; + + switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + ret = bcm_phy_downshift_set(phydev, count); + break; + default: + return -EOPNOTSUPP; + } + + if (ret) + return ret; + + /* Disable EEE advertisement since this prevents the PHY + * from successfully linking up, trigger auto-negotiation restart + * to let the MAC decide what to do. + */ + ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE); + if (ret) + return ret; + + return genphy_restart_aneg(phydev); +} + +static void bcm_omega_get_phy_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + struct bcm_omega_phy_priv *priv = phydev->priv; + + bcm_phy_get_stats(phydev, priv->stats, stats, data); +} + +static int bcm_omega_probe(struct phy_device *phydev) +{ + struct bcm_omega_phy_priv *priv; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + priv->stats = devm_kcalloc(&phydev->mdio.dev, + bcm_phy_get_sset_count(phydev), sizeof(u64), + GFP_KERNEL); + if (!priv->stats) + return -ENOMEM; + + return 0; +} + static struct phy_driver bcm_cygnus_phy_driver[] = { { .phy_id = PHY_ID_BCM_CYGNUS, .phy_id_mask = 0xfffffff0, .name = "Broadcom Cygnus PHY", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm_cygnus_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, .suspend = genphy_suspend, .resume = bcm_cygnus_resume, -} }; +}, { + .phy_id = PHY_ID_BCM_OMEGA, + .phy_id_mask = 0xfffffff0, + .name = "Broadcom Omega Combo GPHY", + /* PHY_GBIT_FEATURES */ + .flags = PHY_IS_INTERNAL, + .config_init = bcm_omega_config_init, + .suspend = genphy_suspend, + .resume = bcm_omega_resume, + .get_tunable = bcm_omega_get_tunable, + .set_tunable = bcm_omega_set_tunable, + .get_sset_count = bcm_phy_get_sset_count, + .get_strings = bcm_phy_get_strings, + .get_stats = bcm_omega_get_phy_stats, + .probe = bcm_omega_probe, +} +}; static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = { { PHY_ID_BCM_CYGNUS, 0xfffffff0, }, + { PHY_ID_BCM_OMEGA, 0xfffffff0, }, { } }; MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl); diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index a75642051b8b..e0d3310957ff 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -371,6 +371,58 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow, } EXPORT_SYMBOL_GPL(bcm_phy_get_stats); +void bcm_phy_r_rc_cal_reset(struct phy_device *phydev) +{ + /* Reset R_CAL/RC_CAL Engine */ + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); + + /* Disable Reset R_AL/RC_CAL Engine */ + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); +} +EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset); + +int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev) +{ + /* Increase VCO range to prevent unlocking problem of PLL at low + * temp + */ + bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048); + + /* Change Ki to 011 */ + bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b); + + /* Disable loading of TVCO buffer to bandgap, set bandgap trim + * to 111 + */ + bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20); + + /* Adjust bias current trim by -3 */ + bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b); + + /* Switch to CORE_BASE1E */ + phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd); + + bcm_phy_r_rc_cal_reset(phydev); + + /* write AFE_RXCONFIG_0 */ + bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19); + + /* write AFE_RXCONFIG_1 */ + bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f); + + /* write AFE_RX_LP_COUNTER */ + bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); + + /* write AFE_HPF_TRIM_OTHERS */ + bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b); + + /* write AFTE_TX_CONFIG */ + bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800); + + return 0; +} +EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init); + MODULE_DESCRIPTION("Broadcom PHY Library"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 17faaefcfd60..5ecacb4e64f0 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -9,6 +9,24 @@ #include <linux/brcmphy.h> #include <linux/phy.h> +/* 28nm only register definitions */ +#define MISC_ADDR(base, channel) base, channel + +#define DSP_TAP10 MISC_ADDR(0x0a, 0) +#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1) +#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2) +#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0) + +#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0) +#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1) +#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2) +#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3) +#define AFE_TX_CONFIG MISC_ADDR(0x39, 0) +#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1) +#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3) +#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0) + + int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); @@ -45,5 +63,7 @@ int bcm_phy_get_sset_count(struct phy_device *phydev); void bcm_phy_get_strings(struct phy_device *phydev, u8 *data); void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow, struct ethtool_stats *stats, u64 *data); +void bcm_phy_r_rc_cal_reset(struct phy_device *phydev); +int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev); #endif /* _LINUX_BCM_PHY_LIB_H */ diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 44e6cff419a0..23f1958ba6ad 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -64,7 +64,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id = 0x00406000, .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (1)", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, @@ -73,7 +73,7 @@ static struct phy_driver bcm63xx_driver[] = { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b8415f8fae14..8fc33867e524 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -37,77 +37,10 @@ #define MII_BCM7XXX_SHD_3_TL4 0x23 #define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1)) -/* 28nm only register definitions */ -#define MISC_ADDR(base, channel) base, channel - -#define DSP_TAP10 MISC_ADDR(0x0a, 0) -#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1) -#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2) -#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0) - -#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0) -#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1) -#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2) -#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3) -#define AFE_TX_CONFIG MISC_ADDR(0x39, 0) -#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1) -#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3) -#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0) - struct bcm7xxx_phy_priv { u64 *stats; }; -static void r_rc_cal_reset(struct phy_device *phydev) -{ - /* Reset R_CAL/RC_CAL Engine */ - bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); - - /* Disable Reset R_AL/RC_CAL Engine */ - bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); -} - -static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) -{ - /* Increase VCO range to prevent unlocking problem of PLL at low - * temp - */ - bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048); - - /* Change Ki to 011 */ - bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b); - - /* Disable loading of TVCO buffer to bandgap, set bandgap trim - * to 111 - */ - bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20); - - /* Adjust bias current trim by -3 */ - bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b); - - /* Switch to CORE_BASE1E */ - phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd); - - r_rc_cal_reset(phydev); - - /* write AFE_RXCONFIG_0 */ - bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19); - - /* write AFE_RXCONFIG_1 */ - bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f); - - /* write AFE_RX_LP_COUNTER */ - bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); - - /* write AFE_HPF_TRIM_OTHERS */ - bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b); - - /* write AFTE_TX_CONFIG */ - bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800); - - return 0; -} - static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) { /* AFE_RXCONFIG_0 */ @@ -143,7 +76,7 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b); /* Reset R_CAL/RC_CAL engine */ - r_rc_cal_reset(phydev); + bcm_phy_r_rc_cal_reset(phydev); return 0; } @@ -171,7 +104,7 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b); /* Reset R_CAL/RC_CAL engine */ - r_rc_cal_reset(phydev); + bcm_phy_r_rc_cal_reset(phydev); return 0; } @@ -196,7 +129,7 @@ static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev) /* Enable ffe zero detection for Vitesse interoperability */ bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015); - r_rc_cal_reset(phydev); + bcm_phy_r_rc_cal_reset(phydev); return 0; } @@ -227,7 +160,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) switch (rev) { case 0xa0: case 0xb0: - ret = bcm7xxx_28nm_b0_afe_config_init(phydev); + ret = bcm_phy_28nm_a0b0_afe_config_init(phydev); break; case 0xd0: ret = bcm7xxx_28nm_d0_afe_config_init(phydev); @@ -605,7 +538,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .phy_id = (_oui), \ .phy_id_mask = 0xfffffff0, \ .name = _name, \ - .features = PHY_GBIT_FEATURES, \ + /* PHY_GBIT_FEATURES */ \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_28nm_config_init, \ .resume = bcm7xxx_28nm_resume, \ @@ -622,7 +555,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .phy_id = (_oui), \ .phy_id_mask = 0xfffffff0, \ .name = _name, \ - .features = PHY_BASIC_FEATURES, \ + /* PHY_BASIC_FEATURES */ \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_28nm_ephy_config_init, \ .resume = bcm7xxx_28nm_ephy_resume, \ @@ -637,7 +570,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) .phy_id = (_oui), \ .phy_id_mask = 0xfffffff0, \ .name = _name, \ - .features = PHY_BASIC_FEATURES, \ + /* PHY_BASIC_FEATURES */ \ .flags = PHY_IS_INTERNAL, \ .config_init = bcm7xxx_config_init, \ .suspend = bcm7xxx_suspend, \ @@ -657,7 +590,6 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), - BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"), BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index cb86a3e90c7d..67fa05d67523 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -610,7 +610,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5411, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -618,7 +618,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -626,7 +626,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM54210E, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54210E", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -634,7 +634,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -642,7 +642,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM54612E, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54612E", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -650,7 +650,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54616S", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -659,7 +659,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -667,7 +667,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -676,7 +676,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM54810, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54810", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -685,7 +685,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm5482_config_init, .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, @@ -694,7 +694,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -702,7 +702,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -710,7 +710,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -718,7 +718,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCMAC131", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -726,7 +726,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM5241, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5241", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -735,7 +735,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5395", .flags = PHY_IS_INTERNAL, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .get_sset_count = bcm_phy_get_sset_count, .get_strings = bcm_phy_get_strings, .get_stats = bcm53xx_phy_get_stats, @@ -744,7 +744,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id = PHY_ID_BCM89610, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM89610", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index 108ed24f8489..9d1612a4d7e6 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -102,7 +102,7 @@ static struct phy_driver cis820x_driver[] = { .phy_id = 0x000fc410, .name = "Cicada Cis8201", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, @@ -110,7 +110,7 @@ static struct phy_driver cis820x_driver[] = { .phy_id = 0x000fc440, .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index bf39baa7f2c8..942f277463a4 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -144,7 +144,7 @@ static struct phy_driver dm91xx_driver[] = { .phy_id = 0x0181b880, .name = "Davicom DM9161E", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -153,7 +153,7 @@ static struct phy_driver dm91xx_driver[] = { .phy_id = 0x0181b8b0, .name = "Davicom DM9161B/C", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -162,7 +162,7 @@ static struct phy_driver dm91xx_driver[] = { .phy_id = 0x0181b8a0, .name = "Davicom DM9161A", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -171,7 +171,7 @@ static struct phy_driver dm91xx_driver[] = { .phy_id = 0x00181b80, .name = "Davicom DM9131", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, } }; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2fe2ebaf62d1..6580094161a9 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1514,7 +1514,7 @@ static struct phy_driver dp83640_driver = { .phy_id = DP83640_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83640", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = dp83640_probe, .remove = dp83640_remove, .soft_reset = dp83640_soft_reset, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 97d45bd5b38e..7ed4760fb155 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -310,7 +310,7 @@ static int dp83822_resume(struct phy_device *phydev) { \ PHY_ID_MATCH_MODEL(_id), \ .name = (_name), \ - .features = PHY_BASIC_FEATURES, \ + /* PHY_BASIC_FEATURES */ \ .soft_reset = dp83822_phy_reset, \ .config_init = dp83822_config_init, \ .get_wol = dp83822_get_wol, \ diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index f55dc907c2f3..6f9bc7d91f17 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); .phy_id = _id, \ .phy_id_mask = 0xfffffff0, \ .name = _name, \ - .features = PHY_BASIC_FEATURES, \ + /* PHY_BASIC_FEATURES */ \ \ .soft_reset = genphy_soft_reset, \ .config_init = _config_init, \ diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8448d01819ef..fd35131a0c39 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -315,7 +315,7 @@ static struct phy_driver dp83867_driver[] = { .phy_id = DP83867_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "TI DP83867", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = dp83867_config_init, .soft_reset = dp83867_phy_reset, diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index e9704af1d239..ac27da16824d 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -338,7 +338,7 @@ static struct phy_driver dp83811_driver[] = { .phy_id = DP83TC811_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "TI DP83TC811", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = dp83811_config_init, .config_aneg = dp83811_config_aneg, .soft_reset = dp83811_phy_reset, diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index 2aa367c04a8e..09e07b902d3a 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -86,7 +86,7 @@ static struct phy_driver et1011c_driver[] = { { .phy_id = 0x0282f014, .name = "ET1011C", .phy_id_mask = 0xfffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, } }; diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index ebef8354bc81..d6e8516cd146 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -311,7 +311,7 @@ static struct phy_driver icplus_driver[] = { .phy_id = 0x02430d80, .name = "ICPlus IP175C", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = &ip175c_config_init, .config_aneg = &ip175c_config_aneg, .read_status = &ip175c_read_status, @@ -321,7 +321,7 @@ static struct phy_driver icplus_driver[] = { .phy_id = 0x02430d90, .name = "ICPlus IP1001", .phy_id_mask = 0x0ffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &ip1001_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -329,7 +329,7 @@ static struct phy_driver icplus_driver[] = { .phy_id = 0x02430c54, .name = "ICPlus IP101A/G", .phy_id_mask = 0x0ffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = ip101a_g_probe, .config_intr = ip101a_g_config_intr, .did_interrupt = ip101a_g_did_interrupt, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 02d9713318b6..b7875b36097f 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -232,7 +232,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY11G_1_3, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -244,7 +244,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY22F_1_3, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.3", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -256,7 +256,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY11G_1_4, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -268,7 +268,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY22F_1_4, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.4", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -280,7 +280,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY11G_1_5, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -291,7 +291,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY22F_1_5, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -302,7 +302,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY11G_VR9_1_1, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.1 integrated)", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -313,7 +313,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY22F_VR9_1_1, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.1 integrated)", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -324,7 +324,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY11G_VR9_1_2, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.2 integrated)", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -335,7 +335,7 @@ static struct phy_driver xway_gphy[] = { .phy_id = PHY_ID_PHY22F_VR9_1_2, .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.2 integrated)", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index a93d673baf35..314486288119 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -251,7 +251,7 @@ static struct phy_driver lxt97x_driver[] = { .phy_id = 0x78100000, .name = "LXT970", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = lxt970_config_init, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, @@ -259,14 +259,14 @@ static struct phy_driver lxt97x_driver[] = { .phy_id = 0x001378e0, .name = "LXT971", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, }, { .phy_id = 0x00137a10, .name = "LXT973-A2", .phy_id_mask = 0xffffffff, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = 0, .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, @@ -275,7 +275,7 @@ static struct phy_driver lxt97x_driver[] = { .phy_id = 0x00137a10, .name = "LXT973", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = 0, .probe = lxt973_probe, .config_aneg = lxt973_config_aneg, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f76c4048b978..a7796134e3be 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -29,6 +29,7 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/marvell_phy.h> +#include <linux/bitfield.h> #include <linux/of.h> #include <linux/io.h> @@ -91,6 +92,14 @@ #define MII_88E1510_TEMP_SENSOR 0x1b #define MII_88E1510_TEMP_SENSOR_MASK 0xff +#define MII_88E1540_COPPER_CTRL3 0x1a +#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK GENMASK(11, 10) +#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS 0 +#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS 1 +#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS 2 +#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS 3 +#define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9) + #define MII_88E6390_MISC_TEST 0x1b #define MII_88E6390_MISC_TEST_SAMPLE_1S 0 #define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14) @@ -128,6 +137,7 @@ #define MII_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 #define MII_88E1510_PHY_LED_DEF 0x1177 +#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -624,7 +634,10 @@ static void marvell_config_led(struct phy_device *phydev) * LED[2] .. Blink, Activity */ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510): - def_config = MII_88E1510_PHY_LED_DEF; + if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE) + def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE; + else + def_config = MII_88E1510_PHY_LED_DEF; break; default: return; @@ -1025,6 +1038,101 @@ static int m88e1145_config_init(struct phy_device *phydev) return 0; } +static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs) +{ + int val; + + val = phy_read(phydev, MII_88E1540_COPPER_CTRL3); + if (val < 0) + return val; + + if (!(val & MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN)) { + *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF; + return 0; + } + + val = FIELD_GET(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val); + + switch (val) { + case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS: + *msecs = 0; + break; + case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS: + *msecs = 10; + break; + case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS: + *msecs = 20; + break; + case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS: + *msecs = 40; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs) +{ + struct ethtool_eee eee; + int val, ret; + + if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF) + return phy_clear_bits(phydev, MII_88E1540_COPPER_CTRL3, + MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN); + + /* According to the Marvell data sheet EEE must be disabled for + * Fast Link Down detection to work properly + */ + ret = phy_ethtool_get_eee(phydev, &eee); + if (!ret && eee.eee_enabled) { + phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n"); + return -EBUSY; + } + + if (*msecs <= 5) + val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS; + else if (*msecs <= 15) + val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS; + else if (*msecs <= 30) + val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS; + else + val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS; + + val = FIELD_PREP(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val); + + ret = phy_modify(phydev, MII_88E1540_COPPER_CTRL3, + MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val); + if (ret) + return ret; + + return phy_set_bits(phydev, MII_88E1540_COPPER_CTRL3, + MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN); +} + +static int m88e1540_get_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_FAST_LINK_DOWN: + return m88e1540_get_fld(phydev, data); + default: + return -EOPNOTSUPP; + } +} + +static int m88e1540_set_tunable(struct phy_device *phydev, + struct ethtool_tunable *tuna, const void *data) +{ + switch (tuna->id) { + case ETHTOOL_PHY_FAST_LINK_DOWN: + return m88e1540_set_fld(phydev, data); + default: + return -EOPNOTSUPP; + } +} + /* The VOD can be out of specification on link up. Poke an * undocumented register, in an undocumented page, with a magic value * to fix this. @@ -2024,7 +2132,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1101, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2042,7 +2150,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1112, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1112", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2060,7 +2168,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1111, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1111", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2079,7 +2187,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1118, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1118", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1118_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2097,7 +2205,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1121R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = &m88e1121_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1121_config_aneg, @@ -2117,7 +2225,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1318S, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1318S", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1318_config_init, .config_aneg = &m88e1318_config_aneg, @@ -2139,7 +2247,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1145, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1145", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1145_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2158,7 +2266,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1149R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1149R", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1149_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2176,7 +2284,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1240, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2194,7 +2302,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1116R, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1116R", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = &m88e1116r_config_init, .ack_interrupt = &marvell_ack_interrupt, @@ -2234,7 +2342,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1540, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = m88e1510_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, @@ -2249,13 +2357,15 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, }, { .phy_id = MARVELL_PHY_ID_88E1545, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1545", .probe = m88e1510_probe, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2274,7 +2384,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E3016, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E3016", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = marvell_probe, .config_init = &m88e3016_config_init, .aneg_done = &marvell_aneg_done, @@ -2294,7 +2404,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E6390, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6390", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = m88e6390_probe, .config_init = &marvell_config_init, .config_aneg = &m88e6390_config_aneg, @@ -2309,6 +2419,8 @@ static struct phy_driver marvell_drivers[] = { .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, }, }; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 100b401b1f4a..238a20e13d6a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -48,6 +48,8 @@ enum { MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */ /* Vendor2 MMD registers */ + MV_V2_PORT_CTRL = 0xf001, + MV_V2_PORT_CTRL_PWRDOWN = 0x0800, MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, MV_V2_TEMP_CTRL_SAMPLE = 0x0000, @@ -226,11 +228,19 @@ static int mv3310_probe(struct phy_device *phydev) static int mv3310_suspend(struct phy_device *phydev) { - return 0; + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, + MV_V2_PORT_CTRL_PWRDOWN); } static int mv3310_resume(struct phy_device *phydev) { + int ret; + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, + MV_V2_PORT_CTRL_PWRDOWN); + if (ret) + return ret; + return mv3310_hwmon_config(phydev, true); } @@ -472,8 +482,9 @@ static struct phy_driver mv3310_drivers[] = { .phy_id = MARVELL_PHY_ID_88E2110, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x2110", - .get_features = genphy_c45_pma_read_abilities, .probe = mv3310_probe, + .suspend = mv3310_suspend, + .resume = mv3310_resume, .soft_reset = genphy_no_soft_reset, .config_init = mv3310_config_init, .config_aneg = mv3310_config_aneg, diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 8295bc7c8c20..4a28fb29adaa 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -92,10 +92,7 @@ static int unimac_mdio_poll(void *wait_func_data) usleep_range(1000, 2000); } while (--timeout); - if (!timeout) - return -ETIMEDOUT; - - return 0; + return -ETIMEDOUT; } static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) @@ -292,7 +289,7 @@ static int unimac_mdio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); + dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus\n"); return 0; diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c new file mode 100644 index 000000000000..6fa29ea8e2a3 --- /dev/null +++ b/drivers/net/phy/mdio-mux-meson-g12a.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Baylibre, SAS. + * Author: Jerome Brunet <jbrunet@baylibre.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mdio-mux.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + +#define ETH_PLL_STS 0x40 +#define ETH_PLL_CTL0 0x44 +#define PLL_CTL0_LOCK_DIG BIT(30) +#define PLL_CTL0_RST BIT(29) +#define PLL_CTL0_EN BIT(28) +#define PLL_CTL0_SEL BIT(23) +#define PLL_CTL0_N GENMASK(14, 10) +#define PLL_CTL0_M GENMASK(8, 0) +#define PLL_LOCK_TIMEOUT 1000000 +#define PLL_MUX_NUM_PARENT 2 +#define ETH_PLL_CTL1 0x48 +#define ETH_PLL_CTL2 0x4c +#define ETH_PLL_CTL3 0x50 +#define ETH_PLL_CTL4 0x54 +#define ETH_PLL_CTL5 0x58 +#define ETH_PLL_CTL6 0x5c +#define ETH_PLL_CTL7 0x60 + +#define ETH_PHY_CNTL0 0x80 +#define EPHY_G12A_ID 0x33000180 +#define ETH_PHY_CNTL1 0x84 +#define PHY_CNTL1_ST_MODE GENMASK(2, 0) +#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3) +#define EPHY_DFLT_ADD 8 +#define PHY_CNTL1_MII_MODE GENMASK(15, 14) +#define EPHY_MODE_RMII 0x1 +#define PHY_CNTL1_CLK_EN BIT(16) +#define PHY_CNTL1_CLKFREQ BIT(17) +#define PHY_CNTL1_PHY_ENB BIT(18) +#define ETH_PHY_CNTL2 0x88 +#define PHY_CNTL2_USE_INTERNAL BIT(5) +#define PHY_CNTL2_SMI_SRC_MAC BIT(6) +#define PHY_CNTL2_RX_CLK_EPHY BIT(9) + +#define MESON_G12A_MDIO_EXTERNAL_ID 0 +#define MESON_G12A_MDIO_INTERNAL_ID 1 + +struct g12a_mdio_mux { + bool pll_is_enabled; + void __iomem *regs; + void *mux_handle; + struct clk *pclk; + struct clk *pll; +}; + +struct g12a_ephy_pll { + void __iomem *base; + struct clk_hw hw; +}; + +#define g12a_ephy_pll_to_dev(_hw) \ + container_of(_hw, struct g12a_ephy_pll, hw) + +static unsigned long g12a_ephy_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); + u32 val, m, n; + + val = readl(pll->base + ETH_PLL_CTL0); + m = FIELD_GET(PLL_CTL0_M, val); + n = FIELD_GET(PLL_CTL0_N, val); + + return parent_rate * m / n; +} + +static int g12a_ephy_pll_enable(struct clk_hw *hw) +{ + struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); + u32 val = readl(pll->base + ETH_PLL_CTL0); + + /* Apply both enable an reset */ + val |= PLL_CTL0_RST | PLL_CTL0_EN; + writel(val, pll->base + ETH_PLL_CTL0); + + /* Clear the reset to let PLL lock */ + val &= ~PLL_CTL0_RST; + writel(val, pll->base + ETH_PLL_CTL0); + + /* Poll on the digital lock instead of the usual analog lock + * This is done because bit 31 is unreliable on some SoC. Bit + * 31 may indicate that the PLL is not lock eventhough the clock + * is actually running + */ + return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val, + val & PLL_CTL0_LOCK_DIG, 0, PLL_LOCK_TIMEOUT); +} + +static void g12a_ephy_pll_disable(struct clk_hw *hw) +{ + struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); + u32 val; + + val = readl(pll->base + ETH_PLL_CTL0); + val &= ~PLL_CTL0_EN; + val |= PLL_CTL0_RST; + writel(val, pll->base + ETH_PLL_CTL0); +} + +static int g12a_ephy_pll_is_enabled(struct clk_hw *hw) +{ + struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); + unsigned int val; + + val = readl(pll->base + ETH_PLL_CTL0); + + return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0; +} + +static void g12a_ephy_pll_init(struct clk_hw *hw) +{ + struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); + + /* Apply PLL HW settings */ + writel(0x29c0040a, pll->base + ETH_PLL_CTL0); + writel(0x927e0000, pll->base + ETH_PLL_CTL1); + writel(0xac5f49e5, pll->base + ETH_PLL_CTL2); + writel(0x00000000, pll->base + ETH_PLL_CTL3); + writel(0x00000000, pll->base + ETH_PLL_CTL4); + writel(0x20200000, pll->base + ETH_PLL_CTL5); + writel(0x0000c002, pll->base + ETH_PLL_CTL6); + writel(0x00000023, pll->base + ETH_PLL_CTL7); +} + +static const struct clk_ops g12a_ephy_pll_ops = { + .recalc_rate = g12a_ephy_pll_recalc_rate, + .is_enabled = g12a_ephy_pll_is_enabled, + .enable = g12a_ephy_pll_enable, + .disable = g12a_ephy_pll_disable, + .init = g12a_ephy_pll_init, +}; + +static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) +{ + int ret; + + /* Enable the phy clock */ + if (!priv->pll_is_enabled) { + ret = clk_prepare_enable(priv->pll); + if (ret) + return ret; + } + + priv->pll_is_enabled = true; + + /* Initialize ephy control */ + writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); + writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | + FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | + FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | + PHY_CNTL1_CLK_EN | + PHY_CNTL1_CLKFREQ | + PHY_CNTL1_PHY_ENB, + priv->regs + ETH_PHY_CNTL1); + writel(PHY_CNTL2_USE_INTERNAL | + PHY_CNTL2_SMI_SRC_MAC | + PHY_CNTL2_RX_CLK_EPHY, + priv->regs + ETH_PHY_CNTL2); + + return 0; +} + +static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv) +{ + /* Reset the mdio bus mux */ + writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2); + + /* Disable the phy clock if enabled */ + if (priv->pll_is_enabled) { + clk_disable_unprepare(priv->pll); + priv->pll_is_enabled = false; + } + + return 0; +} + +static int g12a_mdio_switch_fn(int current_child, int desired_child, + void *data) +{ + struct g12a_mdio_mux *priv = dev_get_drvdata(data); + + if (current_child == desired_child) + return 0; + + switch (desired_child) { + case MESON_G12A_MDIO_EXTERNAL_ID: + return g12a_enable_external_mdio(priv); + case MESON_G12A_MDIO_INTERNAL_ID: + return g12a_enable_internal_mdio(priv); + default: + return -EINVAL; + } +} + +static const struct of_device_id g12a_mdio_mux_match[] = { + { .compatible = "amlogic,g12a-mdio-mux", }, + {}, +}; +MODULE_DEVICE_TABLE(of, g12a_mdio_mux_match); + +static int g12a_ephy_glue_clk_register(struct device *dev) +{ + struct g12a_mdio_mux *priv = dev_get_drvdata(dev); + const char *parent_names[PLL_MUX_NUM_PARENT]; + struct clk_init_data init; + struct g12a_ephy_pll *pll; + struct clk_mux *mux; + struct clk *clk; + char *name; + int i; + + /* get the mux parents */ + for (i = 0; i < PLL_MUX_NUM_PARENT; i++) { + char in_name[8]; + + snprintf(in_name, sizeof(in_name), "clkin%d", i); + clk = devm_clk_get(dev, in_name); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Missing clock %s\n", in_name); + return PTR_ERR(clk); + } + + parent_names[i] = __clk_get_name(clk); + } + + /* create the input mux */ + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + name = kasprintf(GFP_KERNEL, "%s#mux", dev_name(dev)); + if (!name) + return -ENOMEM; + + init.name = name; + init.ops = &clk_mux_ro_ops; + init.flags = 0; + init.parent_names = parent_names; + init.num_parents = PLL_MUX_NUM_PARENT; + + mux->reg = priv->regs + ETH_PLL_CTL0; + mux->shift = __ffs(PLL_CTL0_SEL); + mux->mask = PLL_CTL0_SEL >> mux->shift; + mux->hw.init = &init; + + clk = devm_clk_register(dev, &mux->hw); + kfree(name); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register input mux\n"); + return PTR_ERR(clk); + } + + /* create the pll */ + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + name = kasprintf(GFP_KERNEL, "%s#pll", dev_name(dev)); + if (!name) + return -ENOMEM; + + init.name = name; + init.ops = &g12a_ephy_pll_ops; + init.flags = 0; + parent_names[0] = __clk_get_name(clk); + init.parent_names = parent_names; + init.num_parents = 1; + + pll->base = priv->regs; + pll->hw.init = &init; + + clk = devm_clk_register(dev, &pll->hw); + kfree(name); + if (IS_ERR(clk)) { + dev_err(dev, "failed to register input mux\n"); + return PTR_ERR(clk); + } + + priv->pll = clk; + + return 0; +} + +static int g12a_mdio_mux_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct g12a_mdio_mux *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(priv->pclk)) { + ret = PTR_ERR(priv->pclk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get peripheral clock\n"); + return ret; + } + + /* Make sure the device registers are clocked */ + ret = clk_prepare_enable(priv->pclk); + if (ret) { + dev_err(dev, "failed to enable peripheral clock"); + return ret; + } + + /* Register PLL in CCF */ + ret = g12a_ephy_glue_clk_register(dev); + if (ret) + goto err; + + ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn, + &priv->mux_handle, dev, NULL); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "mdio multiplexer init failed: %d", ret); + goto err; + } + + return 0; + +err: + clk_disable_unprepare(priv->pclk); + return ret; +} + +static int g12a_mdio_mux_remove(struct platform_device *pdev) +{ + struct g12a_mdio_mux *priv = platform_get_drvdata(pdev); + + mdio_mux_uninit(priv->mux_handle); + + if (priv->pll_is_enabled) + clk_disable_unprepare(priv->pll); + + clk_disable_unprepare(priv->pclk); + + return 0; +} + +static struct platform_driver g12a_mdio_mux_driver = { + .probe = g12a_mdio_mux_probe, + .remove = g12a_mdio_mux_remove, + .driver = { + .name = "g12a-mdio_mux", + .of_match_table = g12a_mdio_mux_match, + }, +}; +module_platform_driver(g12a_mdio_mux_driver); + +MODULE_DESCRIPTION("Amlogic G12a MDIO multiplexer driver"); +MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 4be4cc09eb90..bd04fe762056 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -24,6 +24,7 @@ #include <linux/of_gpio.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/reset.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> @@ -55,10 +56,25 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev) return PTR_ERR(gpiod); } - mdiodev->reset = gpiod; + mdiodev->reset_gpio = gpiod; - /* Assert the reset signal again */ - mdio_device_reset(mdiodev, 1); + return 0; +} + +static int mdiobus_register_reset(struct mdio_device *mdiodev) +{ + struct reset_control *reset = NULL; + + if (mdiodev->dev.of_node) + reset = devm_reset_control_get_exclusive(&mdiodev->dev, + "phy"); + if (PTR_ERR(reset) == -ENOENT || + PTR_ERR(reset) == -ENOTSUPP) + reset = NULL; + else if (IS_ERR(reset)) + return PTR_ERR(reset); + + mdiodev->reset_ctrl = reset; return 0; } @@ -74,6 +90,13 @@ int mdiobus_register_device(struct mdio_device *mdiodev) err = mdiobus_register_gpiod(mdiodev); if (err) return err; + + err = mdiobus_register_reset(mdiodev); + if (err) + return err; + + /* Assert the reset signal */ + mdio_device_reset(mdiodev, 1); } mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; @@ -446,8 +469,8 @@ void mdiobus_unregister(struct mii_bus *bus) if (!mdiodev) continue; - if (mdiodev->reset) - gpiod_put(mdiodev->reset); + if (mdiodev->reset_gpio) + gpiod_put(mdiodev->reset_gpio); mdiodev->device_remove(mdiodev); mdiodev->device_free(mdiodev); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 887076292e50..e282600bd83e 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -16,6 +16,7 @@ #include <linux/mii.h> #include <linux/module.h> #include <linux/phy.h> +#include <linux/reset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/unistd.h> @@ -116,10 +117,18 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value) { unsigned int d; - if (!mdiodev->reset) + if (!mdiodev->reset_gpio && !mdiodev->reset_ctrl) return; - gpiod_set_value(mdiodev->reset, value); + if (mdiodev->reset_gpio) + gpiod_set_value(mdiodev->reset_gpio, value); + + if (mdiodev->reset_ctrl) { + if (value) + reset_control_assert(mdiodev->reset_ctrl); + else + reset_control_deassert(mdiodev->reset_ctrl); + } d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay; if (d) diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 0eec2913c289..fa80d6dce8ee 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -224,24 +224,33 @@ static int meson_gxl_config_intr(struct phy_device *phydev) static struct phy_driver meson_gxl_phy[] = { { - .phy_id = 0x01814400, - .phy_id_mask = 0xfffffff0, + PHY_ID_MATCH_EXACT(0x01814400), .name = "Meson GXL Internal PHY", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .soft_reset = genphy_soft_reset, .config_init = meson_gxl_config_init, - .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, .ack_interrupt = meson_gxl_ack_interrupt, .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + }, { + PHY_ID_MATCH_EXACT(0x01803301), + .name = "Meson G12A Internal PHY", + /* PHY_BASIC_FEATURES */ + .flags = PHY_IS_INTERNAL, + .soft_reset = genphy_soft_reset, + .ack_interrupt = meson_gxl_ack_interrupt, + .config_intr = meson_gxl_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = { - { 0x01814400, 0xfffffff0 }, + { PHY_ID_MATCH_VENDOR(0x01814400) }, + { PHY_ID_MATCH_VENDOR(0x01803301) }, { } }; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 352da24f1f33..3c8186f269f9 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -28,6 +28,7 @@ /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 +#define KSZPHY_OMSO_FACTORY_TEST BIT(15) #define KSZPHY_OMSO_B_CAST_OFF BIT(9) #define KSZPHY_OMSO_NAND_TREE_ON BIT(5) #define KSZPHY_OMSO_RMII_OVERRIDE BIT(1) @@ -340,6 +341,18 @@ static int ksz8041_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int ksz8081_config_init(struct phy_device *phydev) +{ + /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line + * based on the RXER (KSZ8081RNA/RND) or TXC (KSZ8081MNX/RNB) pin. If a + * pull-down is missing, the factory test mode should be cleared by + * manually writing a 0. + */ + phy_clear_bits(phydev, MII_KSZPHY_OMSO, KSZPHY_OMSO_FACTORY_TEST); + + return kszphy_config_init(phydev); +} + static int ksz8061_config_init(struct phy_device *phydev) { int ret; @@ -738,6 +751,31 @@ static int ksz8873mll_read_status(struct phy_device *phydev) return 0; } +static int ksz9031_get_features(struct phy_device *phydev) +{ + int ret; + + ret = genphy_read_abilities(phydev); + if (ret < 0) + return ret; + + /* Silicon Errata Sheet (DS80000691D or DS80000692D): + * Whenever the device's Asymmetric Pause capability is set to 1, + * link-up may fail after a link-up to link-down transition. + * + * Workaround: + * Do not enable the Asymmetric Pause capability bit. + */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + + /* We force setting the Pause capability as the core will force the + * Asymmetric Pause capability to 1 otherwise. + */ + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + + return 0; +} + static int ksz9031_read_status(struct phy_device *phydev) { int err; @@ -908,7 +946,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KS8737, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KS8737", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ks8737_type, .config_init = kszphy_config_init, .ack_interrupt = kszphy_ack_interrupt, @@ -919,7 +957,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8021, .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8021 or KSZ8031", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -934,7 +972,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8031, .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8031", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -949,7 +987,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8041, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = ksz8041_config_init, @@ -965,7 +1003,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8041RNLI, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041RNLI", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -980,7 +1018,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8051, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8051", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8051_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -995,7 +1033,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", .phy_id_mask = 0x00fffffc, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1010,10 +1048,10 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8081, .name = "Micrel KSZ8081 or KSZ8091", .phy_id_mask = MICREL_PHY_ID_MASK, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .driver_data = &ksz8081_type, .probe = kszphy_probe, - .config_init = kszphy_config_init, + .config_init = ksz8081_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .get_sset_count = kszphy_get_sset_count, @@ -1025,7 +1063,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8061, .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = ksz8061_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -1035,7 +1073,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, .name = "Micrel KSZ9021 Gigabit PHY", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9021_config_init, @@ -1052,9 +1090,9 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", - .features = PHY_GBIT_FEATURES, .driver_data = &ksz9021_type, .probe = kszphy_probe, + .get_features = ksz9031_get_features, .config_init = ksz9031_config_init, .soft_reset = genphy_soft_reset, .read_status = ksz9031_read_status, @@ -1069,7 +1107,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9131, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@ -1085,7 +1123,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8873MLL Switch", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, @@ -1095,7 +1133,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ886X, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ886X Switch", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = kszphy_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -1103,7 +1141,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8795, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8795", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, @@ -1113,7 +1151,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9477, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9477", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = kszphy_config_init, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index c6cbb3aa8ae0..eb1b3287fe08 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -333,7 +333,7 @@ static struct phy_driver microchip_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Microchip LAN88xx", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .probe = lan88xx_probe, .remove = lan88xx_remove, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index db50efb30df5..28676af97b42 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -85,12 +85,49 @@ enum rgmii_rx_clock_delay { #define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x)) #define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x)) +#define MSCC_EXT_PAGE_CSR_CNTL_17 17 +#define MSCC_EXT_PAGE_CSR_CNTL_18 18 + +#define MSCC_EXT_PAGE_CSR_CNTL_19 19 +#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x) +#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12) +#define MSCC_PHY_CSR_CNTL_19_READ BIT(14) +#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15) + +#define MSCC_EXT_PAGE_CSR_CNTL_20 20 +#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x) + +#define PHY_MCB_TARGET 0x07 +#define PHY_MCB_S6G_WRITE BIT(31) +#define PHY_MCB_S6G_READ BIT(30) + +#define PHY_S6G_PLL5G_CFG0 0x06 +#define PHY_S6G_LCPLL_CFG 0x11 +#define PHY_S6G_PLL_CFG 0x2b +#define PHY_S6G_COMMON_CFG 0x2c +#define PHY_S6G_GPC_CFG 0x2e +#define PHY_S6G_MISC_CFG 0x3b +#define PHY_MCB_S6G_CFG 0x3f +#define PHY_S6G_DFT_CFG2 0x3e +#define PHY_S6G_PLL_STATUS 0x31 +#define PHY_S6G_IB_STATUS0 0x2f + +#define PHY_S6G_SYS_RST_POS 31 +#define PHY_S6G_ENA_LANE_POS 18 +#define PHY_S6G_ENA_LOOP_POS 8 +#define PHY_S6G_QRATE_POS 6 +#define PHY_S6G_IF_MODE_POS 4 +#define PHY_S6G_PLL_ENA_OFFS_POS 21 +#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8 +#define PHY_S6G_PLL_FSM_ENA_POS 7 + #define MSCC_EXT_PAGE_ACCESS 31 #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ #define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */ #define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */ #define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */ #define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */ +#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4 /* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs * in the same package. */ @@ -216,6 +253,7 @@ enum rgmii_rx_clock_delay { #define MSCC_PHY_TR_MSB 18 /* Microsemi PHY ID's */ +#define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8530 0x00070560 #define PHY_ID_VSC8531 0x00070570 #define PHY_ID_VSC8540 0x00070760 @@ -1742,6 +1780,386 @@ static int vsc8584_did_interrupt(struct phy_device *phydev) return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK; } +static int vsc8514_config_pre_init(struct phy_device *phydev) +{ + /* These are the settings to override the silicon default + * values to handle hardware performance of PHY. They + * are set at Power-On state and remain until PHY Reset. + */ + const struct reg_val pre_init1[] = { + {0x0f90, 0x00688980}, + {0x0786, 0x00000003}, + {0x07fa, 0x0050100f}, + {0x0f82, 0x0012b002}, + {0x1686, 0x00000004}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a0, 0x00eeffdd}, + {0x16a6, 0x00071448}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x0f80, 0x00fffaff}, + {0x0fec, 0x00901809}, + {0x0ffe, 0x00b01007}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + }; + unsigned int i; + u16 reg; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* all writes below are broadcasted to all PHYs in the same package */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg |= SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg |= BIT(15); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init1); i++) + vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg &= ~BIT(15); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg &= ~SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + return 0; +} + +static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev, + u32 target, u32 reg) +{ + unsigned long deadline; + u32 val, val_l, val_h; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL); + + /* CSR registers are grouped under different Target IDs. + * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and + * MSCC_EXT_PAGE_CSR_CNTL_19 registers. + * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20 + * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19. + */ + + /* Setup the Target ID */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20, + MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2)); + + /* Trigger CSR Action - Read into the CSR's */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19, + MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ | + MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) | + MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3)); + + /* Wait for register access*/ + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + usleep_range(500, 1000); + val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19); + } while (time_before(jiffies, deadline) && + !(val & MSCC_PHY_CSR_CNTL_19_CMD)); + + if (!(val & MSCC_PHY_CSR_CNTL_19_CMD)) + return 0xffffffff; + + /* Read the Least Significant Word (LSW) (17) */ + val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17); + + /* Read the Most Significant Word (MSW) (18) */ + val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + + return (val_h << 16) | val_l; +} + +static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev, + u32 target, u32 reg, u32 val) +{ + unsigned long deadline; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL); + + /* CSR registers are grouped under different Target IDs. + * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and + * MSCC_EXT_PAGE_CSR_CNTL_19 registers. + * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20 + * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19. + */ + + /* Setup the Target ID */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20, + MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2)); + + /* Write the Least Significant Word (LSW) (17) */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val); + + /* Write the Most Significant Word (MSW) (18) */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16)); + + /* Trigger CSR Action - Write into the CSR's */ + phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19, + MSCC_PHY_CSR_CNTL_19_CMD | + MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) | + MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3)); + + /* Wait for register access */ + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + usleep_range(500, 1000); + val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19); + } while (time_before(jiffies, deadline) && + !(val & MSCC_PHY_CSR_CNTL_19_CMD)); + + if (!(val & MSCC_PHY_CSR_CNTL_19_CMD)) + return -ETIMEDOUT; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb, + u32 op) +{ + unsigned long deadline; + u32 val; + int ret; + + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg, + op | (1 << mcb)); + if (ret) + return -EINVAL; + + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + usleep_range(500, 1000); + val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg); + + if (val == 0xffffffff) + return -EIO; + + } while (time_before(jiffies, deadline) && (val & op)); + + if (val & op) + return -ETIMEDOUT; + + return 0; +} + +/* Trigger a read to the spcified MCB */ +static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb) +{ + return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ); +} + +/* Trigger a write to the spcified MCB */ +static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb) +{ + return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE); +} + +static int vsc8514_config_init(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + unsigned long deadline; + u16 val, addr; + int ret, i; + u32 reg; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + + __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + + addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4); + addr >>= PHY_CNTL_4_ADDR_POS; + + val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); + + if (val & PHY_ADDR_REVERSED) + vsc8531->base_addr = phydev->mdio.addr + addr; + else + vsc8531->base_addr = phydev->mdio.addr - addr; + + /* Some parts of the init sequence are identical for every PHY in the + * package. Some parts are modifying the GPIO register bank which is a + * set of registers that are affecting all PHYs, a few resetting the + * microprocessor common to all PHYs. + * All PHYs' interrupts mask register has to be zeroed before enabling + * any PHY's interrupt in this register. + * For all these reasons, we need to do the init sequence once and only + * once whatever is the first PHY in the package that is initialized and + * do the correct init sequence for all PHYs that are package-critical + * in this pre-init function. + */ + if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) + vsc8514_config_pre_init(phydev); + + vsc8531->pkg_init = true; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); + + val &= ~MAC_CFG_MASK; + val |= MAC_CFG_QSGMII; + ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + + if (ret) + goto err; + + ret = vsc8584_cmd(phydev, + PROC_CMD_MCB_ACCESS_MAC_CONF | + PROC_CMD_RST_CONF_PORT | + PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC); + if (ret) + goto err; + + /* 6g mcb */ + phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0); + /* lcpll mcb */ + phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0); + /* pll5gcfg0 */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_PLL5G_CFG0, 0x7036f145); + if (ret) + goto err; + + phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0); + /* pllcfg */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_PLL_CFG, + (3 << PHY_S6G_PLL_ENA_OFFS_POS) | + (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS) + | (0 << PHY_S6G_PLL_FSM_ENA_POS)); + if (ret) + goto err; + + /* commoncfg */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_COMMON_CFG, + (0 << PHY_S6G_SYS_RST_POS) | + (0 << PHY_S6G_ENA_LANE_POS) | + (0 << PHY_S6G_ENA_LOOP_POS) | + (0 << PHY_S6G_QRATE_POS) | + (3 << PHY_S6G_IF_MODE_POS)); + if (ret) + goto err; + + /* misccfg */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_MISC_CFG, 1); + if (ret) + goto err; + + /* gpcfg */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_GPC_CFG, 768); + if (ret) + goto err; + + phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0); + + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + usleep_range(500, 1000); + phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, + 0); /* read 6G MCB into CSRs */ + reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, + PHY_S6G_PLL_STATUS); + if (reg == 0xffffffff) { + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return -EIO; + } + + } while (time_before(jiffies, deadline) && (reg & BIT(12))); + + if (reg & BIT(12)) { + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return -ETIMEDOUT; + } + + /* misccfg */ + ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, + PHY_S6G_MISC_CFG, 0); + if (ret) + goto err; + + phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0); + + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + usleep_range(500, 1000); + phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, + 0); /* read 6G MCB into CSRs */ + reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, + PHY_S6G_IB_STATUS0); + if (reg == 0xffffffff) { + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return -EIO; + } + + } while (time_before(jiffies, deadline) && !(reg & BIT(8))); + + if (!(reg & BIT(8))) { + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return -ETIMEDOUT; + } + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + if (ret) + return ret; + + ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK, + MEDIA_OP_MODE_COPPER); + + if (ret) + return ret; + + ret = genphy_soft_reset(phydev); + + if (ret) + return ret; + + for (i = 0; i < vsc8531->nleds; i++) { + ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); + if (ret) + return ret; + } + + return ret; + +err: + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return ret; +} + static int vsc85xx_ack_interrupt(struct phy_device *phydev) { int rc = 0; @@ -1791,6 +2209,31 @@ static int vsc85xx_read_status(struct phy_device *phydev) return genphy_read_status(phydev); } +static int vsc8514_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531; + u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, + VSC8531_DUPLEX_COLLISION}; + + vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); + if (!vsc8531) + return -ENOMEM; + + phydev->priv = vsc8531; + + vsc8531->nleds = 4; + vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; + vsc8531->hw_stats = vsc85xx_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; + + return vsc85xx_dt_led_modes_get(phydev, default_mode); +} + static int vsc8574_probe(struct phy_device *phydev) { struct vsc8531_private *vsc8531; @@ -1879,10 +2322,33 @@ static int vsc85xx_probe(struct phy_device *phydev) /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { { + .phy_id = PHY_ID_VSC8514, + .name = "Microsemi GE VSC8514 SyncE", + .phy_id_mask = 0xfffffff0, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc8514_config_init, + .config_aneg = &vsc85xx_config_aneg, + .read_status = &vsc85xx_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc8514_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, +{ .phy_id = PHY_ID_VSC8530, .name = "Microsemi FE VSC8530", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1907,7 +2373,7 @@ static struct phy_driver vsc85xx_driver[] = { .phy_id = PHY_ID_VSC8531, .name = "Microsemi VSC8531", .phy_id_mask = 0xfffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1932,7 +2398,7 @@ static struct phy_driver vsc85xx_driver[] = { .phy_id = PHY_ID_VSC8540, .name = "Microsemi FE VSC8540 SyncE", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1957,7 +2423,7 @@ static struct phy_driver vsc85xx_driver[] = { .phy_id = PHY_ID_VSC8541, .name = "Microsemi VSC8541 SyncE", .phy_id_mask = 0xfffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1982,7 +2448,7 @@ static struct phy_driver vsc85xx_driver[] = { .phy_id = PHY_ID_VSC8574, .name = "Microsemi GE VSC8574 SyncE", .phy_id_mask = 0xfffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -2008,7 +2474,7 @@ static struct phy_driver vsc85xx_driver[] = { .phy_id = PHY_ID_VSC8584, .name = "Microsemi GE VSC8584 SyncE", .phy_id_mask = 0xfffffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -2034,6 +2500,7 @@ static struct phy_driver vsc85xx_driver[] = { module_phy_driver(vsc85xx_driver); static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8514, 0xfffffff0, }, { PHY_ID_VSC8530, 0xfffffff0, }, { PHY_ID_VSC8531, 0xfffffff0, }, { PHY_ID_VSC8540, 0xfffffff0, }, diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 42282a86b680..a221dd552c3c 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -128,7 +128,7 @@ static struct phy_driver dp83865_driver[] = { { .phy_id = DP83865_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = ns_config_init, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 9e24d9569424..abe13dfe50ad 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -262,12 +262,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (val < 0) + return val; + + if (!(val & MDIO_AN_STAT1_COMPLETE)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->lp_advertising); + mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0); + mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0); + phydev->pause = 0; + phydev->asym_pause = 0; + + return 0; + } + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, + val & MDIO_AN_STAT1_LPABLE); + /* Read the link partner's base page advertisement */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); if (val < 0) return val; - mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val); + mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val); phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; @@ -498,21 +516,10 @@ int gen10g_config_aneg(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(gen10g_config_aneg); -static int gen10g_read_status(struct phy_device *phydev) -{ - /* For now just lie and say it's 10G all the time */ - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; - - return genphy_c45_read_link(phydev); -} - -struct phy_driver genphy_10g_driver = { +struct phy_driver genphy_c45_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, - .name = "Generic 10G PHY", + .name = "Generic Clause 45 PHY", .soft_reset = genphy_no_soft_reset, - .features = PHY_10GBIT_FEATURES, - .config_aneg = gen10g_config_aneg, - .read_status = gen10g_read_status, + .read_status = genphy_c45_read_status, }; diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 5016cd5fd7c7..12ce671020a5 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -8,6 +8,11 @@ const char *phy_speed_to_str(int speed) { + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67, + "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " + "If a speed or mode has been added please update phy_speed_to_str " + "and the PHY settings array.\n"); + switch (speed) { case SPEED_10: return "10Mbps"; @@ -35,6 +40,8 @@ const char *phy_speed_to_str(int speed) return "56Gbps"; case SPEED_100000: return "100Gbps"; + case SPEED_200000: + return "200Gbps"; case SPEED_UNKNOWN: return "Unknown"; default: @@ -58,222 +65,81 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str); /* A mapping of all SUPPORTED settings to speed/duplex. This table * must be grouped by speed and sorted in descending match priority * - iow, descending speed. */ + +#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \ + .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT} + static const struct phy_setting settings[] = { + /* 200G */ + PHY_SETTING( 200000, FULL, 200000baseCR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseKR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseDR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseSR4_Full ), /* 100G */ - { - .speed = SPEED_100000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - }, - { - .speed = SPEED_100000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - }, - { - .speed = SPEED_100000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - }, - { - .speed = SPEED_100000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - }, + PHY_SETTING( 100000, FULL, 100000baseCR4_Full ), + PHY_SETTING( 100000, FULL, 100000baseKR4_Full ), + PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ), + PHY_SETTING( 100000, FULL, 100000baseSR4_Full ), + PHY_SETTING( 100000, FULL, 100000baseCR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseKR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseDR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseSR2_Full ), /* 56G */ - { - .speed = SPEED_56000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, - }, - { - .speed = SPEED_56000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, - }, - { - .speed = SPEED_56000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, - }, - { - .speed = SPEED_56000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, - }, + PHY_SETTING( 56000, FULL, 56000baseCR4_Full ), + PHY_SETTING( 56000, FULL, 56000baseKR4_Full ), + PHY_SETTING( 56000, FULL, 56000baseLR4_Full ), + PHY_SETTING( 56000, FULL, 56000baseSR4_Full ), /* 50G */ - { - .speed = SPEED_50000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - }, - { - .speed = SPEED_50000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - }, - { - .speed = SPEED_50000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - }, + PHY_SETTING( 50000, FULL, 50000baseCR2_Full ), + PHY_SETTING( 50000, FULL, 50000baseKR2_Full ), + PHY_SETTING( 50000, FULL, 50000baseSR2_Full ), + PHY_SETTING( 50000, FULL, 50000baseCR_Full ), + PHY_SETTING( 50000, FULL, 50000baseKR_Full ), + PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ), + PHY_SETTING( 50000, FULL, 50000baseDR_Full ), + PHY_SETTING( 50000, FULL, 50000baseSR_Full ), /* 40G */ - { - .speed = SPEED_40000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - }, - { - .speed = SPEED_40000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - }, - { - .speed = SPEED_40000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - }, - { - .speed = SPEED_40000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - }, + PHY_SETTING( 40000, FULL, 40000baseCR4_Full ), + PHY_SETTING( 40000, FULL, 40000baseKR4_Full ), + PHY_SETTING( 40000, FULL, 40000baseLR4_Full ), + PHY_SETTING( 40000, FULL, 40000baseSR4_Full ), /* 25G */ - { - .speed = SPEED_25000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - }, - { - .speed = SPEED_25000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - }, - { - .speed = SPEED_25000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - }, - + PHY_SETTING( 25000, FULL, 25000baseCR_Full ), + PHY_SETTING( 25000, FULL, 25000baseKR_Full ), + PHY_SETTING( 25000, FULL, 25000baseSR_Full ), /* 20G */ - { - .speed = SPEED_20000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, - }, - { - .speed = SPEED_20000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, - }, + PHY_SETTING( 20000, FULL, 20000baseKR2_Full ), + PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ), /* 10G */ - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - }, + PHY_SETTING( 10000, FULL, 10000baseCR_Full ), + PHY_SETTING( 10000, FULL, 10000baseER_Full ), + PHY_SETTING( 10000, FULL, 10000baseKR_Full ), + PHY_SETTING( 10000, FULL, 10000baseKX4_Full ), + PHY_SETTING( 10000, FULL, 10000baseLR_Full ), + PHY_SETTING( 10000, FULL, 10000baseLRM_Full ), + PHY_SETTING( 10000, FULL, 10000baseR_FEC ), + PHY_SETTING( 10000, FULL, 10000baseSR_Full ), + PHY_SETTING( 10000, FULL, 10000baseT_Full ), /* 5G */ - { - .speed = SPEED_5000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, - }, - + PHY_SETTING( 5000, FULL, 5000baseT_Full ), /* 2.5G */ - { - .speed = SPEED_2500, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT, - }, - { - .speed = SPEED_2500, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, - }, + PHY_SETTING( 2500, FULL, 2500baseT_Full ), + PHY_SETTING( 2500, FULL, 2500baseX_Full ), /* 1G */ - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - }, + PHY_SETTING( 1000, FULL, 1000baseKX_Full ), + PHY_SETTING( 1000, FULL, 1000baseT_Full ), + PHY_SETTING( 1000, HALF, 1000baseT_Half ), + PHY_SETTING( 1000, FULL, 1000baseX_Full ), /* 100M */ - { - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, - }, + PHY_SETTING( 100, FULL, 100baseT_Full ), + PHY_SETTING( 100, HALF, 100baseT_Half ), /* 10M */ - { - .speed = SPEED_10, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, - }, + PHY_SETTING( 10, FULL, 10baseT_Full ), + PHY_SETTING( 10, HALF, 10baseT_Half ), }; +#undef PHY_SETTING /** * phy_lookup_setting - lookup a PHY setting diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3745220c5c98..984de987241c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -214,10 +214,6 @@ static void phy_sanitize_settings(struct phy_device *phydev) { const struct phy_setting *setting; - /* Sanitize settings based on PHY capabilities */ - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported)) - phydev->autoneg = AUTONEG_DISABLE; - setting = phy_find_valid(phydev->speed, phydev->duplex, phydev->supported); if (setting) { @@ -891,9 +887,6 @@ void phy_state_machine(struct work_struct *work) old_state = phydev->state; - if (phydev->drv && phydev->drv->link_change_notify) - phydev->drv->link_change_notify(phydev); - switch (phydev->state) { case PHY_DOWN: case PHY_READY: @@ -940,10 +933,13 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - if (old_state != phydev->state) + if (old_state != phydev->state) { phydev_dbg(phydev, "PHY state change %s -> %s\n", phy_state_to_str(old_state), phy_state_to_str(phydev->state)); + if (phydev->drv && phydev->drv->link_change_notify) + phydev->drv->link_change_notify(phydev); + } /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 77068c545de0..2a2aaa5f3e74 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -225,7 +225,7 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev) } static struct phy_driver genphy_driver; -extern struct phy_driver genphy_10g_driver; +extern struct phy_driver genphy_c45_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); @@ -1174,7 +1174,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, */ if (!d->driver) { if (phydev->is_c45) - d->driver = &genphy_10g_driver.mdiodrv.driver; + d->driver = &genphy_c45_driver.mdiodrv.driver; else d->driver = &genphy_driver.mdiodrv.driver; @@ -1335,7 +1335,7 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy); bool phy_driver_is_genphy_10g(struct phy_device *phydev) { return phy_driver_is_genphy_kind(phydev, - &genphy_10g_driver.mdiodrv.driver); + &genphy_c45_driver.mdiodrv.driver); } EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g); @@ -1710,23 +1710,19 @@ int genphy_update_link(struct phy_device *phydev) */ if (!phy_polling_mode(phydev)) { status = phy_read(phydev, MII_BMSR); - if (status < 0) { + if (status < 0) return status; - } else if (status & BMSR_LSTATUS) { - phydev->link = 1; - return 0; - } + else if (status & BMSR_LSTATUS) + goto done; } /* Read link and autonegotiation status */ status = phy_read(phydev, MII_BMSR); if (status < 0) return status; - - if ((status & BMSR_LSTATUS) == 0) - phydev->link = 0; - else - phydev->link = 1; +done: + phydev->link = status & BMSR_LSTATUS ? 1 : 0; + phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0; return 0; } @@ -1743,23 +1739,26 @@ EXPORT_SYMBOL(genphy_update_link); */ int genphy_read_status(struct phy_device *phydev) { - int adv; - int err; - int lpa; - int lpagb = 0; + int adv, lpa, lpagb, err, old_link = phydev->link; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); if (err) return err; + /* why bother the PHY if nothing can have changed */ + if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) + return 0; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + linkmode_zero(phydev->lp_advertising); - if (AUTONEG_ENABLE == phydev->autoneg) { - if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->supported) || - linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->supported)) { + if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) { + if (phydev->is_gigabit_capable) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) return lpagb; @@ -1785,14 +1784,8 @@ int genphy_read_status(struct phy_device *phydev) return lpa; mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); - - phydev->speed = SPEED_UNKNOWN; - phydev->duplex = DUPLEX_UNKNOWN; - phydev->pause = 0; - phydev->asym_pause = 0; - phy_resolve_aneg_linkmode(phydev); - } else { + } else if (phydev->autoneg == AUTONEG_DISABLE) { int bmcr = phy_read(phydev, MII_BMCR); if (bmcr < 0) @@ -1809,9 +1802,6 @@ int genphy_read_status(struct phy_device *phydev) phydev->speed = SPEED_100; else phydev->speed = SPEED_10; - - phydev->pause = 0; - phydev->asym_pause = 0; } return 0; @@ -1829,13 +1819,25 @@ EXPORT_SYMBOL(genphy_read_status); */ int genphy_soft_reset(struct phy_device *phydev) { + u16 res = BMCR_RESET; int ret; - ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET); + if (phydev->autoneg == AUTONEG_ENABLE) + res |= BMCR_ANRESTART; + + ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res); if (ret < 0) return ret; - return phy_poll_reset(phydev); + ret = phy_poll_reset(phydev); + if (ret) + return ret; + + /* BMCR may be reset to defaults */ + if (phydev->autoneg == AUTONEG_DISABLE) + ret = genphy_setup_forced(phydev); + + return ret; } EXPORT_SYMBOL(genphy_soft_reset); @@ -1887,6 +1889,54 @@ int genphy_config_init(struct phy_device *phydev) } EXPORT_SYMBOL(genphy_config_init); +/** + * genphy_read_abilities - read PHY abilities from Clause 22 registers + * @phydev: target phy_device struct + * + * Description: Reads the PHY's abilities and populates + * phydev->supported accordingly. + * + * Returns: 0 on success, < 0 on failure + */ +int genphy_read_abilities(struct phy_device *phydev) +{ + int val; + + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + val = phy_read(phydev, MII_BMSR); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, + val & BMSR_ANEGCAPABLE); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported, + val & BMSR_100FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported, + val & BMSR_100HALF); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported, + val & BMSR_10FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported, + val & BMSR_10HALF); + + if (val & BMSR_ESTATEN) { + val = phy_read(phydev, MII_ESTATUS); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported, val & ESTATUS_1000_TFULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported, val & ESTATUS_1000_THALF); + } + + return 0; +} +EXPORT_SYMBOL(genphy_read_abilities); + /* This is used for the phy device which doesn't support the MMD extended * register access, but it does have side effect when we are trying to access * the MMD register via indirect method. @@ -2104,12 +2154,28 @@ static int phy_probe(struct device *dev) */ if (phydrv->features) { linkmode_copy(phydev->supported, phydrv->features); - } else { + } else if (phydrv->get_features) { err = phydrv->get_features(phydev); - if (err) - goto out; + } else if (phydev->is_c45) { + err = genphy_c45_pma_read_abilities(phydev); + } else { + err = genphy_read_abilities(phydev); } + if (err) + goto out; + + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported)) + phydev->autoneg = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported)) + phydev->is_gigabit_capable = 1; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) + phydev->is_gigabit_capable = 1; + of_set_phy_supported(phydev); linkmode_copy(phydev->advertising, phydev->supported); @@ -2177,11 +2243,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) int retval; /* Either the features are hard coded, or dynamically - * determine. It cannot be both or neither + * determined. It cannot be both. */ - if (WARN_ON((!new_driver->features && !new_driver->get_features) || - (new_driver->features && new_driver->get_features))) { - pr_err("%s: Driver features are missing\n", new_driver->name); + if (WARN_ON(new_driver->features && new_driver->get_features)) { + pr_err("%s: features and get_features must not both be set\n", + new_driver->name); return -EINVAL; } @@ -2243,8 +2309,7 @@ static struct phy_driver genphy_driver = { .phy_id_mask = 0xffffffff, .name = "Generic PHY", .soft_reset = genphy_no_soft_reset, - .config_init = genphy_config_init, - .features = PHY_GBIT_ALL_PORTS_FEATURES, + .get_features = genphy_read_abilities, .aneg_done = genphy_aneg_done, .suspend = genphy_suspend, .resume = genphy_resume, @@ -2261,14 +2326,14 @@ static int __init phy_init(void) features_init(); - rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE); + rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE); if (rc) - goto err_10g; + goto err_c45; rc = phy_driver_register(&genphy_driver, THIS_MODULE); if (rc) { - phy_driver_unregister(&genphy_10g_driver); -err_10g: + phy_driver_unregister(&genphy_c45_driver); +err_c45: mdio_bus_exit(); } @@ -2277,7 +2342,7 @@ err_10g: static void __exit phy_exit(void) { - phy_driver_unregister(&genphy_10g_driver); + phy_driver_unregister(&genphy_c45_driver); phy_driver_unregister(&genphy_driver); mdio_bus_exit(); } diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index 5486f6fb2ab2..1b15a991ee06 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -110,7 +110,7 @@ static struct phy_driver qs6612_driver[] = { { .phy_id = 0x00181440, .name = "QS6612", .phy_id_mask = 0xfffffff0, - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = qs6612_config_init, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 10df52ccddfe..d6a10f323117 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -151,21 +151,14 @@ static int rtl8211_config_aneg(struct phy_device *phydev) static int rtl8211c_config_init(struct phy_device *phydev) { /* RTL8211C has an issue when operating in Gigabit slave mode */ - phy_set_bits(phydev, MII_CTRL1000, - CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); - - return genphy_config_init(phydev); + return phy_set_bits(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); } static int rtl8211f_config_init(struct phy_device *phydev) { - int ret; u16 val = 0; - ret = genphy_config_init(phydev); - if (ret < 0) - return ret; - /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) @@ -192,10 +185,6 @@ static int rtl8366rb_config_init(struct phy_device *phydev) { int ret; - ret = genphy_config_init(phydev); - if (ret < 0) - return ret; - ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE, RTL8366RB_POWER_SAVE_ON); if (ret) { @@ -210,11 +199,9 @@ static struct phy_driver realtek_drvs[] = { { PHY_ID_MATCH_EXACT(0x00008201), .name = "RTL8201CP Ethernet", - .features = PHY_BASIC_FEATURES, }, { PHY_ID_MATCH_EXACT(0x001cc816), .name = "RTL8201F Fast Ethernet", - .features = PHY_BASIC_FEATURES, .ack_interrupt = &rtl8201_ack_interrupt, .config_intr = &rtl8201_config_intr, .suspend = genphy_suspend, @@ -224,14 +211,12 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc910), .name = "RTL8211 Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .config_aneg = rtl8211_config_aneg, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { PHY_ID_MATCH_EXACT(0x001cc912), .name = "RTL8211B Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, .read_mmd = &genphy_read_mmd_unsupported, @@ -241,14 +226,12 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc913), .name = "RTL8211C Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .config_init = rtl8211c_config_init, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { PHY_ID_MATCH_EXACT(0x001cc914), .name = "RTL8211DN Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .ack_interrupt = rtl821x_ack_interrupt, .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, @@ -256,7 +239,6 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc915), .name = "RTL8211E Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, @@ -264,7 +246,6 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .config_init = &rtl8211f_config_init, .ack_interrupt = &rtl8211f_ack_interrupt, .config_intr = &rtl8211f_config_intr, @@ -275,8 +256,6 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc800), .name = "Generic Realtek PHY", - .features = PHY_GBIT_FEATURES, - .config_init = genphy_config_init, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, @@ -284,7 +263,6 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", - .features = PHY_GBIT_FEATURES, .config_init = &rtl8366rb_config_init, /* These interrupts are handled by the irq controller * embedded inside the RTL8366RB, they get unmasked when the diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c index 95abf7072f32..52f1f65320fe 100644 --- a/drivers/net/phy/rockchip.c +++ b/drivers/net/phy/rockchip.c @@ -104,41 +104,14 @@ static int rockchip_integrated_phy_config_init(struct phy_device *phydev) static void rockchip_link_change_notify(struct phy_device *phydev) { - int speed = SPEED_10; - - if (phydev->autoneg == AUTONEG_ENABLE) { - int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS); - - if (reg < 0) { - phydev_err(phydev, "phy_read err: %d.\n", reg); - return; - } - - if (reg & MII_SPEED_100) - speed = SPEED_100; - else if (reg & MII_SPEED_10) - speed = SPEED_10; - } else { - int bmcr = phy_read(phydev, MII_BMCR); - - if (bmcr < 0) { - phydev_err(phydev, "phy_read err: %d.\n", bmcr); - return; - } - - if (bmcr & BMCR_SPEED100) - speed = SPEED_100; - else - speed = SPEED_10; - } - /* * If mode switch happens from 10BT to 100BT, all DSP/AFE * registers are set to default values. So any AFE/DSP * registers have to be re-initialized in this case. */ - if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) { + if (phydev->state == PHY_RUNNING && phydev->speed == SPEED_100) { int ret = rockchip_integrated_phy_analog_init(phydev); + if (ret) phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n", ret); @@ -202,7 +175,7 @@ static struct phy_driver rockchip_phy_driver[] = { .phy_id = INTERNAL_EPHY_ID, .phy_id_mask = 0xfffffff0, .name = "Rockchip integrated EPHY", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = 0, .link_change_notify = rockchip_link_change_notify, .soft_reset = genphy_soft_reset, diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c94d3bfbc772..dc3d92d340c4 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -214,7 +214,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN83C185", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = smsc_phy_probe, @@ -233,7 +233,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8187", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = smsc_phy_probe, @@ -257,7 +257,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8700", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = smsc_phy_probe, @@ -282,7 +282,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN911x Internal PHY", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = smsc_phy_probe, @@ -300,7 +300,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8710/LAN8720", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, @@ -326,7 +326,7 @@ static struct phy_driver smsc_phy_driver[] = { .phy_id_mask = 0xfffffff0, .name = "SMSC LAN8740", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .probe = smsc_phy_probe, diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 5b6acf431f98..d735a01380ed 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -81,7 +81,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE101P_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "STe101p", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, @@ -91,7 +91,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, .name = "STe100p", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c index 219fc7cdc2b3..a32b3fd8a370 100644 --- a/drivers/net/phy/uPD60620.c +++ b/drivers/net/phy/uPD60620.c @@ -87,7 +87,7 @@ static struct phy_driver upd60620_driver[1] = { { .phy_id = UPD60620_PHY_ID, .phy_id_mask = 0xfffffffe, .name = "Renesas uPD60620", - .features = PHY_BASIC_FEATURES, + /* PHY_BASIC_FEATURES */ .flags = 0, .config_init = upd60620_config_init, .read_status = upd60620_read_status, diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index dc0dd87a6694..43691b1acfd9 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -61,7 +61,6 @@ #define PHY_ID_VSC8234 0x000fc620 #define PHY_ID_VSC8244 0x000fc6c0 -#define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8572 0x000704d0 #define PHY_ID_VSC8601 0x00070420 #define PHY_ID_VSC7385 0x00070450 @@ -293,7 +292,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev) err = phy_write(phydev, MII_VSC8244_IMASK, (phydev->drv->phy_id == PHY_ID_VSC8234 || phydev->drv->phy_id == PHY_ID_VSC8244 || - phydev->drv->phy_id == PHY_ID_VSC8514 || phydev->drv->phy_id == PHY_ID_VSC8572 || phydev->drv->phy_id == PHY_ID_VSC8601) ? MII_VSC8244_IMASK_MASK : @@ -389,7 +387,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8234, .name = "Vitesse VSC8234", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -398,16 +396,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", .phy_id_mask = 0x000fffc0, - .features = PHY_GBIT_FEATURES, - .config_init = &vsc824x_config_init, - .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, - .config_intr = &vsc82xx_config_intr, -}, { - .phy_id = PHY_ID_VSC8514, - .name = "Vitesse VSC8514", - .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -416,7 +405,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8572, .name = "Vitesse VSC8572", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -425,7 +414,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8601, .name = "Vitesse VSC8601", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc8601_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -433,7 +422,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC7385, .name = "Vitesse VSC7385", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = vsc738x_config_init, .config_aneg = vsc73xx_config_aneg, .read_page = vsc73xx_read_page, @@ -442,7 +431,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC7388, .name = "Vitesse VSC7388", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = vsc738x_config_init, .config_aneg = vsc73xx_config_aneg, .read_page = vsc73xx_read_page, @@ -451,7 +440,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC7395, .name = "Vitesse VSC7395", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = vsc739x_config_init, .config_aneg = vsc73xx_config_aneg, .read_page = vsc73xx_read_page, @@ -460,7 +449,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC7398, .name = "Vitesse VSC7398", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = vsc739x_config_init, .config_aneg = vsc73xx_config_aneg, .read_page = vsc73xx_read_page, @@ -469,7 +458,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -479,7 +468,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8221, .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8221", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -488,7 +477,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id = PHY_ID_VSC8211, .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8211", - .features = PHY_GBIT_FEATURES, + /* PHY_GBIT_FEATURES */ .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -499,7 +488,6 @@ module_phy_driver(vsc82xx_driver); static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8234, 0x000ffff0 }, { PHY_ID_VSC8244, 0x000fffc0 }, - { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8572, 0x000ffff0 }, { PHY_ID_VSC7385, 0x000ffff0 }, { PHY_ID_VSC7388, 0x000ffff0 }, diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 941cfa8f1c2a..627b3a4405ad 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -316,7 +316,7 @@ static int card_send_command(const int ioaddr[], const char* name, const unsigned char out[], unsigned char in[]) { - int status, x; + int status; if ((status = card_wait_for_busy_clear(ioaddr, name))) return status; @@ -345,9 +345,7 @@ card_send_command(const int ioaddr[], const char* name, out[0], out[1], out[2], out[3], out[4], out[5]); } - if (out[1] == 0x1b) { - x = (out[2] == 0x02); - } else { + if (out[1] != 0x1b) { if (out[0] >= 0x80 && in[0] != (out[1] | 0x80)) return -EIO; } @@ -490,14 +488,13 @@ sb1000_check_CRC(const int ioaddr[], const char* name) static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00}; unsigned char st[7]; - int crc, status; + int status; /* check CRC */ if ((status = card_send_command(ioaddr, name, Command0, st))) return status; if (st[1] != st[3] || st[2] != st[4]) return -EIO; - crc = st[1] << 8 | st[2]; return 0; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 16963f7a88f7..2106045b3e16 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -38,13 +38,11 @@ * Helpers **********/ -#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) - static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); - return team_port_exists(dev) ? port : NULL; + return netif_is_team_port(dev) ? port : NULL; } /* @@ -1143,7 +1141,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return -EINVAL; } - if (team_port_exists(port_dev)) { + if (netif_is_team_port(port_dev)) { NL_SET_ERR_MSG(extack, "Device is already a port of a team device"); netdev_err(dev, "Device %s is already a port " "of a team device\n", portname); @@ -1724,8 +1722,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { /* * This helper function exists to help dev_pick_tx get the correct @@ -2293,7 +2290,7 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, if (err) return err; - option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); + option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION); if (!option_item) return -EMSGSIZE; @@ -2407,7 +2404,7 @@ start_again: if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; - option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); + option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION); if (!option_list) goto nla_put_failure; @@ -2513,9 +2510,11 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) err = -EINVAL; goto team_put; } - err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, - nl_option, team_nl_option_policy, - info->extack); + err = nla_parse_nested_deprecated(opt_attrs, + TEAM_ATTR_OPTION_MAX, + nl_option, + team_nl_option_policy, + info->extack); if (err) goto team_put; if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || @@ -2629,7 +2628,7 @@ static int team_nl_fill_one_port_get(struct sk_buff *skb, { struct nlattr *port_item; - port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); + port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT); if (!port_item) goto nest_cancel; if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) @@ -2684,7 +2683,7 @@ start_again: if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; - port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); + port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT); if (!port_list) goto nla_put_failure; @@ -2758,25 +2757,25 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = team_nl_cmd_noop, - .policy = team_nl_policy, }, { .cmd = TEAM_CMD_OPTIONS_SET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = team_nl_cmd_options_set, - .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_OPTIONS_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = team_nl_cmd_options_get, - .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_PORT_LIST_GET, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = team_nl_cmd_port_list_get, - .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, }; @@ -2789,6 +2788,7 @@ static struct genl_family team_nl_family __ro_after_init = { .name = TEAM_GENL_NAME, .version = TEAM_GENL_VERSION, .maxattr = TEAM_ATTR_MAX, + .policy = team_nl_policy, .netnsok = true, .module = THIS_MODULE, .ops = team_nl_ops, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e9ca1c088d0b..9d72f8c76c15 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct tun_struct *tun = netdev_priv(dev); u16 ret; @@ -1043,7 +1042,7 @@ static int tun_net_close(struct net_device *dev) static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS - if (tun->numqueues == 1 && static_key_false(&rps_needed)) { + if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ @@ -1966,7 +1965,8 @@ drop: if (frags) { /* Exercise flow dissector code path. */ - u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); + u32 headlen = eth_get_headlen(tun->dev, skb->data, + skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { this_cpu_inc(tun->pcpu_stats->rx_dropped); @@ -2873,8 +2873,7 @@ err_free_dev: return err; } -static void tun_get_iff(struct net *net, struct tun_struct *tun, - struct ifreq *ifr) +static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) { tun_debug(KERN_INFO, tun, "tun_get_iff\n"); @@ -3103,10 +3102,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); + net = dev_net(tun->dev); ret = 0; switch (cmd) { case TUNGETIFF: - tun_get_iff(current->nsproxy->net_ns, tun, &ifr); + tun_get_iff(tun, &ifr); if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; @@ -3328,6 +3328,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = tun_net_change_carrier(tun->dev, (bool)carrier); break; + case TUNGETDEVNETNS: + ret = -EPERM; + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + goto unlock; + ret = open_related_ns(&net->ns, get_net_ns); + break; + default: ret = -EINVAL; break; @@ -3457,7 +3464,7 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) rtnl_lock(); tun = tun_get(tfile); if (tun) - tun_get_iff(current->nsproxy->net_ns, tun, &ifr); + tun_get_iff(tun, &ifr); rtnl_unlock(); if (tun) diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 0362acd5cdca..28321aca48fe 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -23,6 +23,7 @@ #include <linux/usb/cdc_ncm.h> #include <net/ipv6.h> #include <net/addrconf.h> +#include <net/ipv6_stubs.h> /* alternative VLAN for IP session 0 if not untagged */ #define MBIM_IPS0_VID 4094 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 3d8a70d3ea9b..a01a71a7e48d 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -54,17 +54,6 @@ #include <linux/workqueue.h> #define USB_VENDOR_APPLE 0x05ac -#define USB_PRODUCT_IPHONE 0x1290 -#define USB_PRODUCT_IPHONE_3G 0x1292 -#define USB_PRODUCT_IPHONE_3GS 0x1294 -#define USB_PRODUCT_IPHONE_4 0x1297 -#define USB_PRODUCT_IPAD 0x129a -#define USB_PRODUCT_IPAD_2 0x12a2 -#define USB_PRODUCT_IPAD_3 0x12a6 -#define USB_PRODUCT_IPAD_MINI 0x12ab -#define USB_PRODUCT_IPHONE_4_VZW 0x129c -#define USB_PRODUCT_IPHONE_4S 0x12a0 -#define USB_PRODUCT_IPHONE_5 0x12a8 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -88,50 +77,9 @@ #define IPHETH_CARRIER_ON 0x04 static const struct usb_device_id ipheth_table[] = { - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPAD, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, - { USB_DEVICE_AND_INTERFACE_INFO( - USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5, - IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, - IPHETH_USBINTF_PROTO) }, + { USB_VENDOR_AND_INTERFACE_INFO(USB_VENDOR_APPLE, IPHETH_USBINTF_CLASS, + IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 679e404a5224..5c3ac97519b7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -63,6 +63,7 @@ enum qmi_wwan_flags { enum qmi_wwan_quirks { QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */ + QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */ }; struct qmimux_hdr { @@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { .data = QMI_WWAN_QUIRK_DTR, }; +static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN | FLAG_SEND_ZLP, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .rx_fixup = qmi_wwan_rx_fixup, + .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG, +}; + #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ @@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = { #define QMI_GOBI_DEVICE(vend, prod) \ QMI_FIXED_INTF(vend, prod, 0) +/* Quectel does not use fixed interface numbers on at least some of their + * devices. We need to check the number of endpoints to ensure that we bind to + * the correct interface. + */ +#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \ + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \ + USB_SUBCLASS_VENDOR_SPEC, 0xff), \ + .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg + static const struct usb_device_id products[] = { /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ @@ -969,20 +989,9 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Quectel EP06/EG06/EM06 */ - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, - 0xff), - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, - }, - { /* Quectel EG12/EM12 */ - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512, - USB_CLASS_VENDOR_SPEC, - USB_SUBCLASS_VENDOR_SPEC, - 0xff), - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, - }, + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1281,7 +1290,6 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ @@ -1361,27 +1369,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } -static bool quectel_diag_detected(struct usb_interface *intf) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; - u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor); - u16 id_product = le16_to_cpu(dev->descriptor.idProduct); - - if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2) - return false; - - if (id_product == 0x0306 || id_product == 0x0512) - return true; - else - return false; -} - static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod) { struct usb_device_id *id = (struct usb_device_id *)prod; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; + const struct driver_info *info; /* Workaround to enable dynamic IDs. This disables usbnet * blacklisting functionality. Which, if required, can be @@ -1415,10 +1408,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, * we need to match on class/subclass/protocol. These values are * identical for the diagnostic- and QMI-interface, but bNumEndpoints is * different. Ignore the current interface if the number of endpoints - * the number for the diag interface (two). + * equals the number for the diag interface (two). */ - if (quectel_diag_detected(intf)) - return -ENODEV; + info = (void *)&id->driver_info; + + if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { + if (desc->bNumEndpoints == 2) + return -ENODEV; + } return usbnet_probe(intf, id); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 86c8c64fbb0f..b01bfa63860d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1212,7 +1212,6 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) goto amacout; } memcpy(sa->sa_data, buf, 6); - ether_addr_copy(tp->netdev->dev_addr, sa->sa_data); netif_info(tp, probe, tp->netdev, "Using pass-thru MAC addr %pM\n", sa->sa_data); @@ -1221,43 +1220,57 @@ amacout: return ret; } -static int set_ethernet_addr(struct r8152 *tp) +static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) { struct net_device *dev = tp->netdev; - struct sockaddr sa; int ret; + sa->sa_family = dev->type; + if (tp->version == RTL_VER_01) { - ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); + ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data); } else { /* if device doesn't support MAC pass through this will * be expected to be non-zero */ - ret = vendor_mac_passthru_addr_read(tp, &sa); + ret = vendor_mac_passthru_addr_read(tp, sa); if (ret < 0) - ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data); } if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); - } else if (!is_valid_ether_addr(sa.sa_data)) { + } else if (!is_valid_ether_addr(sa->sa_data)) { netif_err(tp, probe, dev, "Invalid ether addr %pM\n", - sa.sa_data); + sa->sa_data); eth_hw_addr_random(dev); - ether_addr_copy(sa.sa_data, dev->dev_addr); - ret = rtl8152_set_mac_address(dev, &sa); + ether_addr_copy(sa->sa_data, dev->dev_addr); netif_info(tp, probe, dev, "Random ether addr %pM\n", - sa.sa_data); - } else { - if (tp->version == RTL_VER_01) - ether_addr_copy(dev->dev_addr, sa.sa_data); - else - ret = rtl8152_set_mac_address(dev, &sa); + sa->sa_data); + return 0; } return ret; } +static int set_ethernet_addr(struct r8152 *tp) +{ + struct net_device *dev = tp->netdev; + struct sockaddr sa; + int ret; + + ret = determine_ethernet_addr(tp, &sa); + if (ret < 0) + return ret; + + if (tp->version == RTL_VER_01) + ether_addr_copy(dev->dev_addr, sa.sa_data); + else + ret = rtl8152_set_mac_address(dev, &sa); + + return ret; +} + static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; @@ -4264,10 +4277,18 @@ static int rtl8152_post_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; + struct sockaddr sa; if (!tp) return 0; + /* reset the MAC adddress in case of policy change */ + if (determine_ethernet_addr(tp, &sa) >= 0) { + rtnl_lock(); + dev_set_mac_address (tp->netdev, &sa, NULL); + rtnl_unlock(); + } + netdev = tp->netdev; if (!netif_running(netdev)) return 0; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 569e87a51a33..09a1433b0833 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -162,18 +162,6 @@ static void veth_get_ethtool_stats(struct net_device *dev, } } -static int veth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; - - return 0; -} - static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, @@ -181,7 +169,7 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, - .get_ts_info = veth_get_ts_info, + .get_ts_info = ethtool_op_get_ts_info, }; /* general routines */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7eb38ea9ba56..559c48e66afc 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -31,7 +31,6 @@ #include <linux/average.h> #include <linux/filter.h> #include <linux/kernel.h> -#include <linux/pci.h> #include <net/route.h> #include <net/xdp.h> #include <net/net_failover.h> @@ -1568,7 +1567,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) struct send_queue *sq = &vi->sq[qnum]; int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); - bool kick = !skb->xmit_more; + bool kick = !netdev_xmit_more(); bool use_napi = sq->napi.weight; /* Free up any pending old buffers before queueing new ones. */ @@ -1588,7 +1587,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, - "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); + "Unexpected TXQ (%d) queue failure: %d\n", + qnum, err); dev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -1925,7 +1925,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, return 0; } -static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) +static void virtnet_clean_affinity(struct virtnet_info *vi) { int i; @@ -1949,7 +1949,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi) int stride; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { - virtnet_clean_affinity(vi, -1); + virtnet_clean_affinity(vi); return; } @@ -1999,7 +1999,7 @@ static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, node); - virtnet_clean_affinity(vi, cpu); + virtnet_clean_affinity(vi); return 0; } @@ -2384,7 +2384,7 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { - dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); + dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); return -EINVAL; } @@ -2735,7 +2735,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi) { struct virtio_device *vdev = vi->vdev; - virtnet_clean_affinity(vi, -1); + virtnet_clean_affinity(vi); vdev->config->del_vqs(vdev); @@ -3115,8 +3115,9 @@ static int virtnet_probe(struct virtio_device *vdev) /* Should never trigger: MTU was previously validated * in virtnet_validate. */ - dev_err(&vdev->dev, "device MTU appears to have changed " - "it is now %d < %d", mtu, dev->min_mtu); + dev_err(&vdev->dev, + "device MTU appears to have changed it is now %d < %d", + mtu, dev->min_mtu); goto free; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 9ee4d7402ca2..cf7e6a92e73c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -370,7 +370,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); - ret = neigh_output(neigh, skb); + ret = neigh_output(neigh, skb, false); rcu_read_unlock_bh(); return ret; } @@ -549,7 +549,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; - u32 nexthop; + bool is_v6gw = false; int ret = -EINVAL; nf_reset(skb); @@ -572,13 +572,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s rcu_read_lock_bh(); - nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); - neigh = __ipv4_neigh_lookup_noref(dev, nexthop); - if (unlikely(!neigh)) - neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); + neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); - ret = neigh_output(neigh, skb); + /* if crossing protocols, can not use the cached header */ + ret = neigh_output(neigh, skb, is_v6gw); rcu_read_unlock_bh(); return ret; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d76dfed8d9bb..5994d5415a03 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -20,6 +20,7 @@ #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> +#include <net/ipv6_stubs.h> #include <net/ip.h> #include <net/icmp.h> #include <net/rtnetlink.h> diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index ef298d8525c5..4fe7c7e132c4 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -352,6 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m, case I2400M_SS_IDLE: d_printf(1, dev, "entering BS-negotiated idle mode\n"); + /* Fall through */ case I2400M_SS_DISCONNECTING: case I2400M_SS_DATA_PATH_CONNECTED: wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index 6433ff10d80e..a29cfb9c72c2 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -416,8 +416,8 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; int ret; - ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy, - NULL); + ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len, + ath10k_tm_policy, NULL); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c index d8dcacda9add..f3906dbe5495 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.c +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -74,8 +74,8 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, int err, buf_len; void *buf; - err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy, - NULL); + err = nla_parse_deprecated(tb, ATH6KL_TM_ATTR_MAX, data, len, + ath6kl_tm_policy, NULL); if (err) return err; diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c index 51a038022c8b..7ae14b4d2d0e 100644 --- a/drivers/net/wireless/ath/wcn36xx/testmode.c +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -132,8 +132,8 @@ int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned short attr; wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len); - ret = nla_parse(tb, WCN36XX_TM_ATTR_MAX, data, len, - wcn36xx_tm_policy, NULL); + ret = nla_parse_deprecated(tb, WCN36XX_TM_ATTR_MAX, data, len, + wcn36xx_tm_policy, NULL); if (ret) return ret; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index a1e226652b4a..9a67ad2a589c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -2620,8 +2620,8 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, - wil_rf_sector_policy, NULL); + rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, + data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); return rc; @@ -2679,13 +2679,13 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, QCA_ATTR_PAD)) goto nla_put_failure; - nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG); + nl_cfgs = nla_nest_start_noflag(msg, QCA_ATTR_DMG_RF_SECTOR_CFG); if (!nl_cfgs) goto nla_put_failure; for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) { if (!(rf_modules_vec & BIT(i))) continue; - nl_cfg = nla_nest_start(msg, i); + nl_cfg = nla_nest_start_noflag(msg, i); if (!nl_cfg) goto nla_put_failure; si = &reply.evt.sectors_info[i]; @@ -2740,8 +2740,8 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, - wil_rf_sector_policy, NULL); + rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, + data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); return rc; @@ -2773,9 +2773,11 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, cmd.sector_type = sector_type; nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG], tmp) { - rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX, - nl_cfg, wil_rf_sector_cfg_policy, - NULL); + rc = nla_parse_nested_deprecated(tb2, + QCA_ATTR_DMG_RF_SECTOR_CFG_MAX, + nl_cfg, + wil_rf_sector_cfg_policy, + NULL); if (rc) { wil_err(wil, "invalid sector cfg\n"); return -EINVAL; @@ -2847,8 +2849,8 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, - wil_rf_sector_policy, NULL); + rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, + data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); return rc; @@ -2955,8 +2957,8 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, - wil_rf_sector_policy, NULL); + rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, + data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); return rc; diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 46408a560814..aedee026c5e2 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -1835,7 +1835,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct lpphy_tx_gains gains, oldgains; + struct lpphy_tx_gains oldgains; int old_txpctl, old_afe_ovr, old_rf, old_bbmult; lpphy_read_tx_pctl_mode_from_hardware(dev); @@ -1849,9 +1849,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF); if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0) - lpphy_papd_cal(dev, gains, 0, 1, 30); + lpphy_papd_cal(dev, oldgains, 0, 1, 30); else - lpphy_papd_cal(dev, gains, 0, 1, 65); + lpphy_papd_cal(dev, oldgains, 0, 1, 65); if (old_afe_ovr) lpphy_set_tx_gains(dev, oldgains); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 73d3c1a0a7c9..98b168736df0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -490,11 +490,18 @@ fail: return -ENOMEM; } -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) +{ + struct brcmf_bcdc *bcdc = drvr->proto->pd; + + brcmf_fws_detach_pre_delif(bcdc->fws); +} + +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) { struct brcmf_bcdc *bcdc = drvr->proto->pd; drvr->proto->pd = NULL; - brcmf_fws_detach(bcdc->fws); + brcmf_fws_detach_post_delif(bcdc->fws); kfree(bcdc); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h index 3b0e9eff21b5..4bc52240ccea 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h @@ -18,14 +18,16 @@ #ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); -void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr); void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state); void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, bool success); struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr); #else static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } -static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} +static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {}; +static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {} #endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index ec129864cc9c..60aede5abb4d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -628,15 +628,13 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) err = brcmf_sdiod_set_backplane_window(sdiodev, addr); if (err) - return err; + goto out; addr &= SBSDIO_SB_OFT_ADDR_MASK; addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - if (!err) - err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, - mypkt); - + err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt); +out: brcmu_pkt_buf_free_skb(mypkt); return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 3d441c5c745c..2fe167eae22c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -91,6 +91,7 @@ struct brcmf_bus_ops { int (*get_fwname)(struct device *dev, const char *ext, unsigned char *fw_name); void (*debugfs_create)(struct device *dev); + int (*reset)(struct device *dev); }; @@ -245,6 +246,15 @@ void brcmf_bus_debugfs_create(struct brcmf_bus *bus) return bus->ops->debugfs_create(bus->dev); } +static inline +int brcmf_bus_reset(struct brcmf_bus *bus) +{ + if (!bus->ops->reset) + return -EOPNOTSUPP; + + return bus->ops->reset(bus->dev); +} + /* * interface functions from common layer */ @@ -262,6 +272,8 @@ void brcmf_detach(struct device *dev); void brcmf_dev_reset(struct device *dev); /* Request from bus module to initiate a coredump */ void brcmf_dev_coredump(struct device *dev); +/* Indication that firmware has halted or crashed */ +void brcmf_fw_crashed(struct device *dev); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index e92f6351bd22..8ee8af4e7ec4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->req_ie = kmemdup(cfg->extra_buf, conn_info->req_ie_len, GFP_KERNEL); + if (!conn_info->req_ie) + conn_info->req_ie_len = 0; } else { conn_info->req_ie_len = 0; conn_info->req_ie = NULL; @@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, conn_info->resp_ie = kmemdup(cfg->extra_buf, conn_info->resp_ie_len, GFP_KERNEL); + if (!conn_info->resp_ie) + conn_info->resp_ie_len = 0; } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 4fbe8791f674..7d6a08779693 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, bool rtnl_locked) { struct brcmf_if *ifp; + int ifidx; ifp = drvr->iflist[bsscfgidx]; - drvr->iflist[bsscfgidx] = NULL; if (!ifp) { bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx); return; } brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifp->ifidx); - if (drvr->if2bss[ifp->ifidx] == bsscfgidx) - drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID; + ifidx = ifp->ifidx; + if (ifp->ndev) { if (bsscfgidx == 0) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { @@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, brcmf_p2p_ifp_removed(ifp, rtnl_locked); kfree(ifp); } + + drvr->iflist[bsscfgidx] = NULL; + if (drvr->if2bss[ifidx] == bsscfgidx) + drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID; } void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked) @@ -1084,6 +1088,14 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data) return 0; } +static void brcmf_core_bus_reset(struct work_struct *work) +{ + struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, + bus_reset); + + brcmf_bus_reset(drvr->bus_if); +} + static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) { int ret = -1; @@ -1155,6 +1167,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) #endif #endif /* CONFIG_INET */ + INIT_WORK(&drvr->bus_reset, brcmf_core_bus_reset); + /* populate debugfs */ brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); brcmf_feat_debugfs_create(drvr); @@ -1273,6 +1287,18 @@ void brcmf_dev_coredump(struct device *dev) brcmf_dbg(TRACE, "failed to create coredump\n"); } +void brcmf_fw_crashed(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + + bphy_err(drvr, "Firmware has halted or crashed\n"); + + brcmf_dev_coredump(dev); + + schedule_work(&drvr->bus_reset); +} + void brcmf_detach(struct device *dev) { s32 i; @@ -1299,6 +1325,8 @@ void brcmf_detach(struct device *dev) brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN); + brcmf_proto_detach_pre_delif(drvr); + /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS-1; i > -1; i--) brcmf_remove_interface(drvr->iflist[i], false); @@ -1308,7 +1336,7 @@ void brcmf_detach(struct device *dev) brcmf_bus_stop(drvr->bus_if); - brcmf_proto_detach(drvr); + brcmf_proto_detach_post_delif(drvr); bus_if->drvr = NULL; wiphy_free(drvr->wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index d8085ce579f4..9f09aa31eeda 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -143,6 +143,8 @@ struct brcmf_pub { struct notifier_block inet6addr_notifier; struct brcmf_mp_device *settings; + struct work_struct bus_reset; + u8 clmver[BRCMF_DCMD_SMLEN]; }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 8209a42dea72..6a333dd80b2d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -711,7 +711,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, size_t mp_path_len; u32 i, j; char end = '\0'; - size_t reqsz; for (i = 0; i < table_size; i++) { if (mapping_table[i].chipid == chip && @@ -726,8 +725,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, return NULL; } - reqsz = sizeof(*fwreq) + n_fwnames * sizeof(struct brcmf_fw_item); - fwreq = kzalloc(reqsz, GFP_KERNEL); + fwreq = kzalloc(struct_size(fwreq, items, n_fwnames), GFP_KERNEL); if (!fwreq) return NULL; @@ -743,6 +741,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, for (j = 0; j < n_fwnames; j++) { fwreq->items[j].path = fwnames[j].path; + fwnames[j].path[0] = '\0'; /* check if firmware path is provided by module parameter */ if (brcmf_mp_global.firmware_path[0] != '\0') { strlcpy(fwnames[j].path, mp_path, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index abeb305492e0..c22c49ae552e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) return ifidx == *(int *)arg; } -static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, - int ifidx) -{ - bool (*matchfn)(struct sk_buff *, void *) = NULL; - struct sk_buff *skb; - int prec; - - if (ifidx != -1) - matchfn = brcmf_fws_ifidx_match; - for (prec = 0; prec < q->num_prec; prec++) { - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - while (skb) { - brcmu_pkt_buf_free_skb(skb); - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); - } - } -} - static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; @@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, return 0; } +static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, + int ifidx) +{ + bool (*matchfn)(struct sk_buff *, void *) = NULL; + struct sk_buff *skb; + int prec; + u32 hslot; + + if (ifidx != -1) + matchfn = brcmf_fws_ifidx_match; + for (prec = 0; prec < q->num_prec; prec++) { + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + while (skb) { + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + true); + brcmu_pkt_buf_free_skb(skb); + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); + } + } +} + static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, u32 slot_id) { @@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp) brcmf_fws_lock(fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); + brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx], + ifp->ifidx); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(fws, ifp->ifidx); brcmf_fws_unlock(fws); @@ -2437,17 +2443,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) return fws; fail: - brcmf_fws_detach(fws); + brcmf_fws_detach_pre_delif(fws); + brcmf_fws_detach_post_delif(fws); return ERR_PTR(rc); } -void brcmf_fws_detach(struct brcmf_fws_info *fws) +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws) { if (!fws) return; - - if (fws->fws_wq) + if (fws->fws_wq) { destroy_workqueue(fws->fws_wq); + fws->fws_wq = NULL; + } +} + +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws) +{ + if (!fws) + return; /* cleanup */ brcmf_fws_lock(fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h index 4e6835766d5d..749c06dcdc17 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h @@ -19,7 +19,8 @@ #define FWSIGNAL_H_ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr); -void brcmf_fws_detach(struct brcmf_fws_info *fws); +void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws); +void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws); void brcmf_fws_debugfs_create(struct brcmf_pub *drvr); bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 58a6bc379358..fd3968fd158e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -345,6 +345,10 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE }; +static void brcmf_pcie_setup(struct device *dev, int ret, + struct brcmf_fw_request *fwreq); +static struct brcmf_fw_request * +brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo); static u32 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) @@ -730,7 +734,7 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) } if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) { brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n"); - brcmf_dev_coredump(&devinfo->pdev->dev); + brcmf_fw_crashed(&devinfo->pdev->dev); } } @@ -1409,6 +1413,36 @@ int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name) return 0; } +static int brcmf_pcie_reset(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + struct brcmf_pciedev_info *devinfo = buspub->devinfo; + struct brcmf_fw_request *fwreq; + int err; + + brcmf_detach(dev); + + brcmf_pcie_release_irq(devinfo); + brcmf_pcie_release_scratchbuffers(devinfo); + brcmf_pcie_release_ringbuffers(devinfo); + brcmf_pcie_reset_device(devinfo); + + fwreq = brcmf_pcie_prepare_fw_request(devinfo); + if (!fwreq) { + dev_err(dev, "Failed to prepare FW request\n"); + return -ENOMEM; + } + + err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup); + if (err) { + dev_err(dev, "Failed to prepare FW request\n"); + kfree(fwreq); + } + + return err; +} + static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, @@ -1418,6 +1452,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { .get_ramsize = brcmf_pcie_get_ramsize, .get_memdump = brcmf_pcie_get_memdump, .get_fwname = brcmf_pcie_get_fwname, + .reset = brcmf_pcie_reset, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index 024c643052bc..c7964ccdda69 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -67,16 +67,22 @@ fail: return -ENOMEM; } -void brcmf_proto_detach(struct brcmf_pub *drvr) +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr) { brcmf_dbg(TRACE, "Enter\n"); if (drvr->proto) { if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) - brcmf_proto_bcdc_detach(drvr); + brcmf_proto_bcdc_detach_post_delif(drvr); else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF) brcmf_proto_msgbuf_detach(drvr); kfree(drvr->proto); drvr->proto = NULL; } } + +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr) +{ + if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC) + brcmf_proto_bcdc_detach_pre_delif(drvr); +} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h index d3c3b9a815ad..72355aea9028 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h @@ -54,7 +54,8 @@ struct brcmf_proto { int brcmf_proto_attach(struct brcmf_pub *drvr); -void brcmf_proto_detach(struct brcmf_pub *drvr); +void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr); +void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr); static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, struct sk_buff *skb, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 4d104ab80fd8..22b73da42822 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -622,6 +622,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio"); /* Note the names are not postfixed with a1 for backward compatibility */ BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio"); BRCMF_FW_DEF(43455, "brcmfmac43455-sdio"); +BRCMF_FW_DEF(43456, "brcmfmac43456-sdio"); BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); @@ -642,7 +643,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339), BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0), BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), - BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), + BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456), + BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455), BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373), @@ -1090,8 +1092,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) /* dongle indicates the firmware has halted/crashed */ if (hmb_data & HMB_DATA_FWHALT) { - brcmf_err("mailbox indicates firmware halted\n"); - brcmf_dev_coredump(&sdiod->func1->dev); + brcmf_dbg(SDIO, "mailbox indicates firmware halted\n"); + brcmf_fw_crashed(&sdiod->func1->dev); } /* Dongle recomposed rx frames, accept them again */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index e9cbfd077710..75fcd6752edc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -160,7 +160,7 @@ struct brcmf_usbdev_info { struct usb_device *usbdev; struct device *dev; - struct mutex dev_init_lock; + struct completion dev_init_done; int ctl_in_pipe, ctl_out_pipe; struct urb *ctl_urb; /* URB for control endpoint */ @@ -445,22 +445,17 @@ fail: } -static void brcmf_usb_free_q(struct list_head *q, bool pending) +static void brcmf_usb_free_q(struct list_head *q) { struct brcmf_usbreq *req, *next; - int i = 0; + list_for_each_entry_safe(req, next, q, list) { if (!req->urb) { brcmf_err("bad req\n"); break; } - i++; - if (pending) { - usb_kill_urb(req->urb); - } else { - usb_free_urb(req->urb); - list_del_init(&req->list); - } + usb_free_urb(req->urb); + list_del_init(&req->list); } } @@ -682,12 +677,18 @@ static int brcmf_usb_up(struct device *dev) static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo) { + int i; + if (devinfo->ctl_urb) usb_kill_urb(devinfo->ctl_urb); if (devinfo->bulk_urb) usb_kill_urb(devinfo->bulk_urb); - brcmf_usb_free_q(&devinfo->tx_postq, true); - brcmf_usb_free_q(&devinfo->rx_postq, true); + if (devinfo->tx_reqs) + for (i = 0; i < devinfo->bus_pub.ntxq; i++) + usb_kill_urb(devinfo->tx_reqs[i].urb); + if (devinfo->rx_reqs) + for (i = 0; i < devinfo->bus_pub.nrxq; i++) + usb_kill_urb(devinfo->rx_reqs[i].urb); } static void brcmf_usb_down(struct device *dev) @@ -1023,8 +1024,8 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo); /* free the URBS */ - brcmf_usb_free_q(&devinfo->rx_freeq, false); - brcmf_usb_free_q(&devinfo->tx_freeq, false); + brcmf_usb_free_q(&devinfo->rx_freeq); + brcmf_usb_free_q(&devinfo->tx_freeq); usb_free_urb(devinfo->ctl_urb); usb_free_urb(devinfo->bulk_urb); @@ -1193,11 +1194,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, if (ret) goto error; - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return; error: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret); - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); device_release_driver(dev); } @@ -1265,7 +1266,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; /* we are done */ - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); return 0; } bus->chip = bus_pub->devid; @@ -1325,11 +1326,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->usbdev = usb; devinfo->dev = &usb->dev; - /* Take an init lock, to protect for disconnect while still loading. + /* Init completion, to protect for disconnect while still loading. * Necessary because of the asynchronous firmware load construction */ - mutex_init(&devinfo->dev_init_lock); - mutex_lock(&devinfo->dev_init_lock); + init_completion(&devinfo->dev_init_done); usb_set_intfdata(intf, devinfo); @@ -1407,7 +1407,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) return 0; fail: - mutex_unlock(&devinfo->dev_init_lock); + complete(&devinfo->dev_init_done); kfree(devinfo); usb_set_intfdata(intf, NULL); return ret; @@ -1422,7 +1422,7 @@ brcmf_usb_disconnect(struct usb_interface *intf) devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); if (devinfo) { - mutex_lock(&devinfo->dev_init_lock); + wait_for_completion(&devinfo->dev_init_done); /* Make sure that devinfo still exists. Firmware probe routines * may have released the device and cleared the intfdata. */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c index ce4144a89217..a20b6c885047 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.c +++ b/drivers/net/wireless/intel/iwlegacy/4965.c @@ -577,7 +577,6 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res) sign = -sign; denom = -denom; } - *res = 1; *res = ((num * 2 + denom) / (denom * 2)) * sign; return 1; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 0a87d87fbb4f..17b34f6e4515 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -89,6 +89,7 @@ #define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" #define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" #define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" +#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" @@ -180,7 +181,11 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024 + .d3_debug_data_length = 60 * 1024, \ + .fw_mon_smem_write_ptr_addr = 0xa0c16c, \ + .fw_mon_smem_write_ptr_msk = 0xfffff, \ + .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \ + .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff #define IWL_DEVICE_AX200_COMMON \ IWL_DEVICE_22000_COMMON, \ @@ -190,7 +195,8 @@ static const struct iwl_ht_params iwl_22000_ht_params = { IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22000, \ .base_params = &iwl_22000_base_params, \ - .csr = &iwl_csr_v1 + .csr = &iwl_csr_v1, \ + .gp2_reg_addr = 0xa02c68 #define IWL_DEVICE_22560 \ IWL_DEVICE_22000_COMMON, \ @@ -203,7 +209,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .device_family = IWL_DEVICE_FAMILY_AX210, \ .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v1, \ - .min_txq_size = 128 + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_256_ba_txq_size = 512 const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", @@ -440,12 +448,20 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = { .name = "Intel(R) Wi-Fi 7 AX211 160MHz", .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, + .uhb_supported = true, IWL_DEVICE_AX210, }; const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { .name = "Intel(R) Wi-Fi 7 AX210 160MHz", .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = { + .name = "Intel(R) Wi-Fi 7 AX210 160MHz", + .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE, IWL_DEVICE_AX210, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 3225b64eb845..41bdd0eaf62c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -148,7 +148,11 @@ static const struct iwl_tt_params iwl9000_tt_params = { .d3_debug_data_length = 92 * 1024, \ .ht_params = &iwl9000_ht_params, \ .nvm_ver = IWL9000_NVM_VERSION, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .fw_mon_smem_write_ptr_addr = 0xa0476c, \ + .fw_mon_smem_write_ptr_msk = 0xfffff, \ + .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \ + .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff const struct iwl_cfg iwl9160_2ac_cfg = { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 33858787817b..af1e3d08c179 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -60,12 +60,13 @@ #include <linux/bitops.h> -/* +/** * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures + * * @tlv_version: version info * @apply_point: &enum iwl_fw_ini_apply_point * @data: TLV data followed - **/ + */ struct iwl_fw_ini_header { __le32 tlv_version; __le32 apply_point; @@ -73,7 +74,7 @@ struct iwl_fw_ini_header { } __packed; /* FW_DEBUG_TLV_HEADER_S */ /** - * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION) + * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) * buffer allocation TLV - for debug * * @iwl_fw_ini_header: header @@ -84,7 +85,7 @@ struct iwl_fw_ini_header { * @max_fragments: the maximum allowed fragmentation in the desired memory * allocation above * @min_frag_size: the minimum allowed fragmentation size in bytes -*/ + */ struct iwl_fw_ini_allocation_tlv { struct iwl_fw_ini_header header; __le32 allocation_id; @@ -95,33 +96,52 @@ struct iwl_fw_ini_allocation_tlv { } __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */ /** - * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD) - * Generic Host command pass through TLV + * enum iwl_fw_ini_dbg_domain - debug domains + * allows to send host cmd or collect memory region if a given domain is enabled + * + * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on + * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain + */ +enum iwl_fw_ini_dbg_domain { + IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0, + IWL_FW_INI_DBG_DOMAIN_REPORT_PS, +}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */ + +/** + * struct iwl_fw_ini_hcmd * * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC * @group: the desired cmd group - * @padding: all zeros for dword alignment - * @data: all of the relevant command (0xf6/0xf5) to be sent -*/ + * @reserved: to align to FW struct + * @data: all of the relevant command data to be sent + */ struct iwl_fw_ini_hcmd { u8 id; u8 group; - __le16 padding; + __le16 reserved; u8 data[0]; -} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */ +} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */ /** - * struct iwl_fw_ini_hcmd_tlv + * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD) + * Generic Host command pass through TLV + * * @header: header + * @domain: send command only if the specific domain is enabled + * &enum iwl_fw_ini_dbg_domain + * @period_msec: period in which the hcmd will be sent to FW. Measured in msec + * (0 = one time command). * @hcmd: a variable length host-command to be sent to apply the configuration. */ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_header header; + __le32 domain; + __le32 period_msec; struct iwl_fw_ini_hcmd hcmd; -} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */ +} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */ -/* - * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW) +/** + * struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW) * * @header: header * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow @@ -135,7 +155,19 @@ struct iwl_fw_ini_debug_flow_tlv { #define IWL_FW_INI_MAX_NAME 32 /** + * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump. + * + * @id_and_grp: id and group of dhc response. + * @desc: dhc response descriptor. + */ +struct iwl_fw_ini_region_cfg_dhc { + __le32 id_and_grp; + __le32 desc; +} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */ + +/** * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region + * * @num_of_range: the amount of ranges in the region * @range_data_size: size of the data to read per range, in bytes. */ @@ -146,6 +178,7 @@ struct iwl_fw_ini_region_cfg_internal { /** * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region + * * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region. * It is unused for tx. @@ -163,34 +196,43 @@ struct iwl_fw_ini_region_cfg_fifos { /** * struct iwl_fw_ini_region_cfg + * * @region_id: ID of this dump configuration * @region_type: &enum iwl_fw_ini_region_type - * @num_regions: amount of regions in the address array. + * @domain: dump this region only if the specific domain is enabled + * &enum iwl_fw_ini_dbg_domain * @name_len: name length * @name: file name to use for this region * @internal: used in case the region uses internal memory. * @allocation_id: For DRAM type field substitutes for allocation_id * @fifos: used in case of fifos region. + * @dhc_desc: dhc response descriptor. + * @notif_id_and_grp: dump this region only if the specific notification + * occurred. * @offset: offset to use for each memory base address * @start_addr: array of addresses. */ struct iwl_fw_ini_region_cfg { __le32 region_id; __le32 region_type; + __le32 domain; __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_cfg_internal internal; __le32 allocation_id; struct iwl_fw_ini_region_cfg_fifos fifos; - }; + struct iwl_fw_ini_region_cfg_dhc dhc_desc; + __le32 notif_id_and_grp; + }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */ __le32 offset; __le32 start_addr[]; -} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */ +} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */ /** - * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG) - * DUMP sections define IDs and triggers that use those IDs TLV + * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS) + * defines memory regions to dump + * * @header: header * @num_regions: how many different region section and IDs are coming next * @region_config: list of dump configurations @@ -199,13 +241,12 @@ struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header header; __le32 num_regions; struct iwl_fw_ini_region_cfg region_config[]; -} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */ +} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */ /** - * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG) - * Region sections define IDs and triggers that use those IDs TLV + * struct iwl_fw_ini_trigger * - * @trigger_id: enum &iwl_fw_ini_tigger_id + * @trigger_id: &enum iwl_fw_ini_trigger_id * @override_trig: determines how apply trigger in case a trigger with the * same id is already in use. Using the first 2 bytes: * Byte 0: if 0, override trigger configuration, otherwise use the @@ -214,6 +255,7 @@ struct iwl_fw_ini_region_tlv { * existing trigger. * @dump_delay: delay from trigger fire to dump, in usec * @occurrences: max amount of times to be fired + * @reserved: to align to FW struct * @ignore_consec: ignore consecutive triggers, in usec * @force_restart: force FW restart * @multi_dut: initiate debug dump data on several DUTs @@ -226,17 +268,18 @@ struct iwl_fw_ini_trigger { __le32 override_trig; __le32 dump_delay; __le32 occurrences; + __le32 reserved; __le32 ignore_consec; __le32 force_restart; __le32 multi_dut; __le32 trigger_data; __le32 num_regions; __le32 data[]; -} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */ +} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */ /** - * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG) - * DUMP sections define IDs and triggers that use those IDs TLV + * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS) + * Triggers that hold memory regions to dump in case a trigger fires * * @header: header * @num_triggers: how many different triggers section and IDs are coming next @@ -246,16 +289,18 @@ struct iwl_fw_ini_trigger_tlv { struct iwl_fw_ini_header header; __le32 num_triggers; struct iwl_fw_ini_trigger trigger_config[]; -} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */ +} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */ /** * enum iwl_fw_ini_trigger_id + * * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification - * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification + * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger + * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency * threshold was crossed @@ -299,47 +344,51 @@ enum iwl_fw_ini_trigger_id { /* FW triggers */ IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4, - IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5, + IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5, /* User trigger */ IWL_FW_TRIGGER_ID_USER_TRIGGER = 6, + /* periodic uses the data field for the interval time */ + IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7, + /* Host triggers */ - IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7, - IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8, - IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9, - IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10, - IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11, - IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12, - IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13, - IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14, - IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15, - IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16, - IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17, - IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18, - IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19, - IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20, - IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21, - IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22, - IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23, - IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24, - IWL_FW_TRIGGER_ID_HOST_D3_START = 25, - IWL_FW_TRIGGER_ID_HOST_D3_END = 26, - IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27, - IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28, - IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29, - IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30, - IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31, - IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32, - IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33, - IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34, - IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35, + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8, + IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9, + IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10, + IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11, + IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12, + IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13, + IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14, + IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15, + IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16, + IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17, + IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18, + IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19, + IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20, + IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21, + IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22, + IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23, + IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25, + IWL_FW_TRIGGER_ID_HOST_D3_START = 26, + IWL_FW_TRIGGER_ID_HOST_D3_END = 27, + IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28, + IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29, + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30, + IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32, + IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33, + IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34, + IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35, + IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36, IWL_FW_TRIGGER_ID_NUM, }; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */ /** * enum iwl_fw_ini_apply_point + * * @IWL_FW_INI_APPLY_INVALID: invalid * @IWL_FW_INI_APPLY_EARLY: pre loading FW * @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive @@ -360,6 +409,7 @@ enum iwl_fw_ini_apply_point { /** * enum iwl_fw_ini_allocation_id + * * @IWL_FW_INI_ALLOCATION_INVALID: invalid * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration @@ -380,18 +430,22 @@ enum iwl_fw_ini_allocation_id { /** * enum iwl_fw_ini_buffer_location + * * @IWL_FW_INI_LOCATION_INVALID: invalid * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location + * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location */ enum iwl_fw_ini_buffer_location { IWL_FW_INI_LOCATION_INVALID, IWL_FW_INI_LOCATION_SRAM_PATH, IWL_FW_INI_LOCATION_DRAM_PATH, + IWL_FW_INI_LOCATION_NPK_PATH, }; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** * enum iwl_fw_ini_debug_flow + * * @IWL_FW_INI_DEBUG_INVALID: invalid * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined @@ -404,6 +458,7 @@ enum iwl_fw_ini_debug_flow { /** * enum iwl_fw_ini_region_type + * * @IWL_FW_INI_REGION_INVALID: invalid * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC @@ -416,6 +471,8 @@ enum iwl_fw_ini_debug_flow { * @IWL_FW_INI_REGION_RXF: RX fifo * @IWL_FW_INI_REGION_PAGING: paging memory * @IWL_FW_INI_REGION_CSR: CSR registers + * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data + * @IWL_FW_INI_REGION_DHC: dhc response to dump * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -431,6 +488,8 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_RXF, IWL_FW_INI_REGION_PAGING, IWL_FW_INI_REGION_CSR, + IWL_FW_INI_REGION_NOTIFICATION, + IWL_FW_INI_REGION_DHC, IWL_FW_INI_REGION_NUM }; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 941c50477003..85c5e367cbf1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -542,6 +542,66 @@ enum iwl_he_htc_flags { #define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS) /** + * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP + * @sta_id: STA id + * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg + * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit + * @reserved1: reserved byte for future use + * @reserved2: reserved byte for future use + * @flags: see %iwl_11ax_sta_ctxt_flags + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes + * @htc_flags: which features are supported in HTC + * @frag_flags: frag support in A-MSDU + * @frag_level: frag support level + * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) + * @frag_min_size: min frag size (except last frag) + * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @htc_trig_based_pkt_ext: default PE in 4us units + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @reserved3: reserved byte for future use + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + */ +struct iwl_he_sta_context_cmd_v1 { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + + /* The below fields are set via Multiple BSSID IE */ + u8 ref_bssid_addr[6]; + __le16 reserved0; + + /* The below fields are set via HE-capabilities IE */ + __le32 htc_flags; + + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + + /* The below fields are set via PPE thresholds element */ + struct iwl_he_pkt_ext pkt_ext; + + /* The below fields are set via HE-Operation IE */ + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + + /* Random access parameter set (i.e. RAPS) */ + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 reserved3; + + /* The below fields are set via MU EDCA parameter set element */ + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; +} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */ + +/** * struct iwl_he_sta_context_cmd - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg @@ -564,6 +624,14 @@ enum iwl_he_htc_flags { * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @reserved3: reserved byte for future use * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + * @max_bssid_indicator: indicator of the max bssid supported on the associated + * bss + * @bssid_index: index of the associated VAP + * @ema_ap: AP supports enhanced Multi BSSID advertisement + * @profile_periodicity: number of Beacon periods that are needed to receive the + * complete VAPs info + * @bssid_count: actual number of VAPs in the MultiBSS Set + * @reserved4: alignment */ struct iwl_he_sta_context_cmd { u8 sta_id; @@ -599,7 +667,14 @@ struct iwl_he_sta_context_cmd { /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; -} __packed; /* STA_CONTEXT_DOT11AX_API_S */ + + u8 max_bssid_indicator; + u8 bssid_index; + u8 ema_ap; + u8 profile_periodicity; + u8 bssid_count; + u8 reserved4[3]; +} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ /** * struct iwl_he_monitor_cmd - configure air sniffer for HE diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 93b392f0c6a4..97b49843e318 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -233,7 +233,8 @@ struct iwl_nvm_get_info_phy { __le32 rx_chains; } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ -#define IWL_NUM_CHANNELS (51) +#define IWL_NUM_CHANNELS_V1 51 +#define IWL_NUM_CHANNELS 110 /** * struct iwl_nvm_get_info_regulatory - regulatory information @@ -241,13 +242,39 @@ struct iwl_nvm_get_info_phy { * @channel_profile: regulatory data of this channel * @reserved: reserved */ -struct iwl_nvm_get_info_regulatory { +struct iwl_nvm_get_info_regulatory_v1 { __le32 lar_enabled; - __le16 channel_profile[IWL_NUM_CHANNELS]; + __le16 channel_profile[IWL_NUM_CHANNELS_V1]; __le16 reserved; } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ /** + * struct iwl_nvm_get_info_regulatory - regulatory information + * @lar_enabled: is LAR enabled + * @n_channels: number of valid channels in the array + * @channel_profile: regulatory data of this channel + */ +struct iwl_nvm_get_info_regulatory { + __le32 lar_enabled; + __le32 n_channels; + __le32 channel_profile[IWL_NUM_CHANNELS]; +} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */ + +/** + * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data + * @general: general NVM data + * @mac_sku: data relating to MAC sku + * @phy_sku: data relating to PHY sku + * @regulatory: regulatory data + */ +struct iwl_nvm_get_info_rsp_v3 { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory_v1 regulatory; +} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ + +/** * struct iwl_nvm_get_info_rsp - response to get NVM data * @general: general NVM data * @mac_sku: data relating to MAC sku @@ -259,7 +286,7 @@ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ +} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 6e8224ce8906..d55312ef58c9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -688,13 +688,6 @@ struct iwl_rx_mpdu_desc { #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) -#define IWL_CD_STTS_OPTIMIZED_POS 0 -#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 -#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 -#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E -#define IWL_CD_STTS_WIFI_STATUS_POS 4 -#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 - #define RX_NO_DATA_CHAIN_A_POS 0 #define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS) #define RX_NO_DATA_CHAIN_B_POS 8 @@ -747,62 +740,6 @@ struct iwl_rx_no_data { __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ -/** - * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) - * @IWL_CD_STTS_UNUSED: unused - * @IWL_CD_STTS_UNUSED_2: unused - * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. - * In sniffer mode, when split is used, set in last CD completion. (RX) - * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for - * all CD completion. (RX) - * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) - * @IWL_CD_STTS_ERROR: general error (RX) - */ -enum iwl_completion_desc_transfer_status { - IWL_CD_STTS_UNUSED, - IWL_CD_STTS_UNUSED_2, - IWL_CD_STTS_END_TRANSFER, - IWL_CD_STTS_OVERFLOW, - IWL_CD_STTS_ABORTED, - IWL_CD_STTS_ERROR, -}; - -/** - * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) - * @IWL_CD_STTS_VALID: the packet is valid (RX) - * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) - * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) - * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) - * @IWL_CD_STTS_DUP: duplicate packet (RX) - * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) - * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) - * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) - * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) - * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) - * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) - * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) - * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) - * @IWL_CD_STTS_NOT_USED: completed but not used (RX) - * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) - */ -enum iwl_completion_desc_wifi_status { - IWL_CD_STTS_VALID, - IWL_CD_STTS_FCS_ERR, - IWL_CD_STTS_SEC_KEY_ERR, - IWL_CD_STTS_DECRYPTION_ERR, - IWL_CD_STTS_DUP, - IWL_CD_STTS_ICV_MIC_ERR, - IWL_CD_STTS_INTERNAL_SNAP_ERR, - IWL_CD_STTS_SEC_PORT_FAIL, - IWL_CD_STTS_BA_OLD_SN, - IWL_CD_STTS_QOS_NULL, - IWL_CD_STTS_MAC_HDR_ERR, - IWL_CD_STTS_MAX_RETRANS, - IWL_CD_STTS_EX_LIFETIME, - IWL_CD_STTS_NOT_USED, - IWL_CD_STTS_REPLAY_ERR, -}; - struct iwl_frame_release { u8 baid; u8 reserved; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 890a939c463d..1a67a2a439ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -788,7 +788,53 @@ struct iwl_umac_scan_complete { __le32 reserved; } __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ -#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 +#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5 +#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7 + +/** + * struct iwl_scan_offload_profile_match_v1 - match information + * @bssid: matched bssid + * @reserved: reserved + * @channel: channel where the match occurred + * @energy: energy + * @matching_feature: feature matches + * @matching_channels: bitmap of channels that matched, referencing + * the channels passed in the scan offload request. + */ +struct iwl_scan_offload_profile_match_v1 { + u8 bssid[ETH_ALEN]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1]; +} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ + +/** + * struct iwl_scan_offload_profiles_query_v1 - match results query response + * @matched_profiles: bitmap of matched profiles, referencing the + * matches passed in the scan offload request + * @last_scan_age: age of the last offloaded scan + * @n_scans_done: number of offloaded scans done + * @gp2_d0u: GP2 when D0U occurred + * @gp2_invoked: GP2 when scan offload was invoked + * @resume_while_scanning: not used + * @self_recovery: obsolete + * @reserved: reserved + * @matches: array of match information, one for each match + */ +struct iwl_scan_offload_profiles_query_v1 { + __le32 matched_profiles; + __le32 last_scan_age; + __le32 n_scans_done; + __le32 gp2_d0u; + __le32 gp2_invoked; + u8 resume_while_scanning; + u8 self_recovery; + __le16 reserved; + struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ + /** * struct iwl_scan_offload_profile_match - match information * @bssid: matched bssid @@ -797,7 +843,7 @@ struct iwl_umac_scan_complete { * @energy: energy * @matching_feature: feature matches * @matching_channels: bitmap of channels that matched, referencing - * the channels passed in tue scan offload request + * the channels passed in the scan offload request. */ struct iwl_scan_offload_profile_match { u8 bssid[ETH_ALEN]; @@ -806,7 +852,7 @@ struct iwl_scan_offload_profile_match { u8 energy; u8 matching_feature; u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; -} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ +} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */ /** * struct iwl_scan_offload_profiles_query - match results query response @@ -831,7 +877,7 @@ struct iwl_scan_offload_profiles_query { u8 self_recovery; __le16 reserved; struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; -} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */ /** * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index d7380016f1c0..be72529cc789 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -804,8 +804,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, } static struct iwl_fw_error_dump_file * -_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dump_ptrs *fw_error_dump) +iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dump_ptrs *fw_error_dump) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; @@ -967,10 +967,11 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); - if (radio_len) - iwl_read_radio_regs(fwrt, &dump_data); } + if (radio_len) + iwl_read_radio_regs(fwrt, &dump_data); + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); @@ -1049,14 +1050,14 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; - u32 addr, prph_val, offset = le32_to_cpu(reg->offset); + u32 prph_val; + u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); int i; - range->start_addr = reg->start_addr[idx]; + range->start_addr = cpu_to_le64(addr); range->range_data_size = reg->internal.range_data_size; for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) { - addr = le32_to_cpu(range->start_addr) + i; - prph_val = iwl_read_prph(fwrt->trans, addr + offset); + prph_val = iwl_read_prph(fwrt->trans, addr + i); if (prph_val == 0x5a5a5a5a) return -EBUSY; *val++ = cpu_to_le32(prph_val); @@ -1071,16 +1072,13 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; - u32 addr, offset = le32_to_cpu(reg->offset); + u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); int i; - range->start_addr = reg->start_addr[idx]; + range->start_addr = cpu_to_le64(addr); range->range_data_size = reg->internal.range_data_size; - for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) { - addr = le32_to_cpu(range->start_addr) + i; - *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, - addr + offset)); - } + for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) + *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } @@ -1090,12 +1088,11 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, void *range_ptr, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; - u32 addr = le32_to_cpu(range->start_addr); - u32 offset = le32_to_cpu(reg->offset); + u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset); - range->start_addr = reg->start_addr[idx]; + range->start_addr = cpu_to_le64(addr); range->range_data_size = reg->internal.range_data_size; - iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data, + iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->internal.range_data_size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); @@ -1109,7 +1106,7 @@ iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 page_size = fwrt->trans->init_dram.paging[idx].size; - range->start_addr = cpu_to_le32(idx); + range->start_addr = cpu_to_le64(idx); range->range_data_size = cpu_to_le32(page_size); memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, page_size); @@ -1129,7 +1126,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; - range->start_addr = cpu_to_le32(idx); + range->start_addr = cpu_to_le64(idx); range->range_data_size = cpu_to_le32(page_size); dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); @@ -1152,7 +1149,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, if (start_addr == 0x5a5a5a5a) return -EBUSY; - range->start_addr = cpu_to_le32(start_addr); + range->start_addr = cpu_to_le64(start_addr); range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size); memcpy(range->data, fwrt->trans->fw_mon[idx].block, @@ -1228,10 +1225,11 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr; struct iwl_ini_txf_iter_data *iter; + struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->offset), addr; u32 registers_size = - le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); - __le32 *val = range->data; + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump); + __le32 *data; unsigned long flags; int i; @@ -1249,11 +1247,18 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); - /* read txf registers */ + /* + * read txf registers. for each register, write to the dump the + * register address and its value + */ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { addr = le32_to_cpu(reg->start_addr[i]) + offs; - *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + reg_dump->addr = cpu_to_le32(addr); + reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, + addr)); + + reg_dump++; } if (reg->fifos.header_only) { @@ -1270,8 +1275,9 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, /* Read FIFO */ addr = TXF_READ_MODIFY_DATA + offs; - for (i = 0; i < iter->fifo_size; i += sizeof(__le32)) - *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + data = (void *)reg_dump; + for (i = 0; i < iter->fifo_size; i += sizeof(*data)) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans, &flags); @@ -1327,10 +1333,11 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr; struct iwl_ini_rxf_data rxf_data; + struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->offset), addr; u32 registers_size = - le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); - __le32 *val = range->data; + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump); + __le32 *data; unsigned long flags; int i; @@ -1341,17 +1348,22 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return -EBUSY; - offs += rxf_data.offset; - range->fifo_num = cpu_to_le32(rxf_data.fifo_num); range->num_of_registers = reg->fifos.num_of_registers; range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); - /* read rxf registers */ + /* + * read rxf registers. for each register, write to the dump the + * register address and its value + */ for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { addr = le32_to_cpu(reg->start_addr[i]) + offs; - *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + reg_dump->addr = cpu_to_le32(addr); + reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, + addr)); + + reg_dump++; } if (reg->fifos.header_only) { @@ -1359,6 +1371,12 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, goto out; } + /* + * region register have absolute value so apply rxf offset after + * reading the registers + */ + offs += rxf_data.offset; + /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); /* Set fence pointer to the same place like WR pointer */ @@ -1369,8 +1387,9 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, /* Read FIFO */ addr = RXF_FIFO_RD_FENCE_INC + offs; - for (i = 0; i < rxf_data.size; i += sizeof(__le32)) - *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + data = (void *)reg_dump; + for (i = 0; i < rxf_data.size; i += sizeof(*data)) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans, &flags); @@ -1384,32 +1403,86 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_error_dump *dump = data; + dump->header.version = cpu_to_le32(IWL_INI_DUMP_MEM_VER); + return dump->ranges; } static void -*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_region_cfg *reg, - void *data) +*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + struct iwl_fw_ini_monitor_dump *data, + u32 write_ptr_addr, u32 write_ptr_msk, + u32 cycle_cnt_addr, u32 cycle_cnt_msk) { - struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data; u32 write_ptr, cycle_cnt; unsigned long flags; if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { - IWL_ERR(fwrt, "Failed to get DRAM monitor header\n"); + IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; } - write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans, - MON_BUFF_WRPTR_VER2); - cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans, - MON_BUFF_CYCLE_CNT_VER2); + + write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr); + cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr); + iwl_trans_release_nic_access(fwrt->trans, &flags); - mon_dump->write_ptr = cpu_to_le32(write_ptr); - mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt); + data->header.version = cpu_to_le32(IWL_INI_DUMP_MONITOR_VER); + data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk); + data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk); + + return data->ranges; +} + +static void +*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *data) +{ + struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk; + + switch (fwrt->trans->cfg->device_family) { + case IWL_DEVICE_FAMILY_9000: + case IWL_DEVICE_FAMILY_22000: + write_ptr_addr = MON_BUFF_WRPTR_VER2; + write_ptr_msk = -1; + cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2; + cycle_cnt_msk = -1; + break; + default: + IWL_ERR(fwrt, "Unsupported device family %d\n", + fwrt->trans->cfg->device_family); + return NULL; + } + + return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr, + write_ptr_msk, cycle_cnt_addr, + cycle_cnt_msk); +} + +static void +*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *data) +{ + struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + const struct iwl_cfg *cfg = fwrt->trans->cfg; + + if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 && + fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { + IWL_ERR(fwrt, "Unsupported device family %d\n", + fwrt->trans->cfg->device_family); + return NULL; + } + + return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, + cfg->fw_mon_smem_write_ptr_addr, + cfg->fw_mon_smem_write_ptr_msk, + cfg->fw_mon_smem_cycle_cnt_ptr_addr, + cfg->fw_mon_smem_cycle_cnt_ptr_msk); - return mon_dump->ranges; } static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt, @@ -1418,6 +1491,8 @@ static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_fifo_error_dump *dump = data; + dump->header.version = cpu_to_le32(IWL_INI_DUMP_FIFO_VER); + return dump->ranges; } @@ -1509,7 +1584,8 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { - u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump); + u32 size = sizeof(struct iwl_fw_ini_monitor_dump) + + sizeof(struct iwl_fw_ini_error_dump_range); if (fwrt->trans->num_blocks) size += fwrt->trans->fw_mon[0].size; @@ -1517,6 +1593,15 @@ static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, return size; } +static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return sizeof(struct iwl_fw_ini_monitor_dump) + + iwl_dump_ini_mem_ranges(fwrt, reg) * + (sizeof(struct iwl_fw_ini_error_dump_range) + + le32_to_cpu(reg->internal.range_data_size)); +} + static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_region_cfg *reg) { @@ -1524,7 +1609,7 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, void *fifo_iter = fwrt->dump.fifo_iter; u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) + - le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2; fwrt->dump.fifo_iter = &iter; while (iwl_ini_txf_iter(fwrt, reg)) { @@ -1547,7 +1632,7 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_ini_rxf_data rx_data; u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) + sizeof(struct iwl_fw_ini_fifo_error_dump_range) + - le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2; if (reg->fifos.header_only) return size; @@ -1584,17 +1669,17 @@ struct iwl_dump_ini_mem_ops { * @fwrt: fw runtime struct. * @data: dump memory data. * @reg: region to copy to the dump. + * @ops: memory dump operations. */ static void iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_region_type type, struct iwl_fw_error_dump_data **data, struct iwl_fw_ini_region_cfg *reg, struct iwl_dump_ini_mem_ops *ops) { struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data; + u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type); void *range; - u32 num_of_ranges, i; if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || !ops->fill_range)) @@ -1605,6 +1690,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, (*data)->type = cpu_to_le32(type | INI_DUMP_BIT); (*data)->len = cpu_to_le32(ops->get_size(fwrt, reg)); + header->region_id = reg->region_id; header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME, le32_to_cpu(reg->name_len))); @@ -1643,7 +1729,6 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) { u32 reg_id = le32_to_cpu(trigger->data[i]); struct iwl_fw_ini_region_cfg *reg; - enum iwl_fw_ini_region_type type; if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) continue; @@ -1652,13 +1737,11 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, if (WARN(!reg, "Unassigned region %d\n", reg_id)) continue; - type = le32_to_cpu(reg->region_type); - switch (type) { + switch (le32_to_cpu(reg->region_type)) { case IWL_FW_INI_REGION_DEVICE_MEMORY: case IWL_FW_INI_REGION_PERIPHERY_MAC: case IWL_FW_INI_REGION_PERIPHERY_PHY: case IWL_FW_INI_REGION_PERIPHERY_AUX: - case IWL_FW_INI_REGION_INTERNAL_BUFFER: case IWL_FW_INI_REGION_CSR: size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg); break; @@ -1668,7 +1751,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_REGION_RXF: size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg); break; - case IWL_FW_INI_REGION_PAGING: { + case IWL_FW_INI_REGION_PAGING: size += hdr_len; if (iwl_fw_dbg_is_paging_enabled(fwrt)) { size += iwl_dump_ini_paging_get_size(fwrt, reg); @@ -1677,13 +1760,16 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, reg); } break; - } case IWL_FW_INI_REGION_DRAM_BUFFER: if (!fwrt->trans->num_blocks) break; size += hdr_len + iwl_dump_ini_mon_dram_get_size(fwrt, reg); break; + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + size += hdr_len + + iwl_dump_ini_mon_smem_get_size(fwrt, reg); + break; case IWL_FW_INI_REGION_DRAM_IMR: /* Undefined yet */ default: @@ -1701,7 +1787,6 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, for (i = 0; i < num; i++) { u32 reg_id = le32_to_cpu(trigger->data[i]); - enum iwl_fw_ini_region_type type; struct iwl_fw_ini_region_cfg *reg; struct iwl_dump_ini_mem_ops ops; @@ -1713,15 +1798,17 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, if (!reg) continue; - type = le32_to_cpu(reg->region_type); - switch (type) { + /* currently the driver supports always on domain only */ + if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) + continue; + + switch (le32_to_cpu(reg->region_type)) { case IWL_FW_INI_REGION_DEVICE_MEMORY: - case IWL_FW_INI_REGION_INTERNAL_BUFFER: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_dev_mem_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_PERIPHERY_MAC: case IWL_FW_INI_REGION_PERIPHERY_PHY: @@ -1730,16 +1817,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_prph_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_DRAM_BUFFER: ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges; ops.get_size = iwl_dump_ini_mon_dram_get_size; ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header; ops.fill_range = iwl_dump_ini_mon_dram_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); + break; + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; + ops.get_size = iwl_dump_ini_mon_smem_get_size; + ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header; + ops.fill_range = iwl_dump_ini_dev_mem_iter; + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; - case IWL_FW_INI_REGION_PAGING: { + case IWL_FW_INI_REGION_PAGING: ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; if (iwl_fw_dbg_is_paging_enabled(fwrt)) { ops.get_num_of_ranges = @@ -1754,9 +1848,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, ops.fill_range = iwl_dump_ini_paging_gen2_iter; } - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; - } case IWL_FW_INI_REGION_TXF: { struct iwl_ini_txf_iter_data iter = { .init = true }; void *fifo_iter = fwrt->dump.fifo_iter; @@ -1766,7 +1859,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, ops.get_size = iwl_dump_ini_txf_get_size; ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header; ops.fill_range = iwl_dump_ini_txf_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); fwrt->dump.fifo_iter = fifo_iter; break; } @@ -1775,14 +1868,14 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, ops.get_size = iwl_dump_ini_rxf_get_size; ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header; ops.fill_range = iwl_dump_ini_rxf_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_CSR: ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; ops.get_size = iwl_dump_ini_mem_get_size; ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; ops.fill_range = iwl_dump_ini_csr_iter; - iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + iwl_dump_ini_mem(fwrt, data, reg, &ops); break; case IWL_FW_INI_REGION_DRAM_IMR: /* This is undefined yet */ @@ -1793,16 +1886,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, } static struct iwl_fw_error_dump_file * -_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dump_ptrs *fw_error_dump) +iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt) { - int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type); + int size; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_ini_trigger *trigger; - - if (id == FW_DBG_TRIGGER_FW_ASSERT) - id = IWL_FW_TRIGGER_ID_FW_ASSERT; + enum iwl_fw_ini_trigger_id id = fwrt->dump.ini_trig_id; if (!iwl_fw_ini_trigger_on(fwrt, id)) return NULL; @@ -1819,8 +1909,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, if (!dump_file) return NULL; - fw_error_dump->fwrt_ptr = dump_file; - dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; dump_file->file_len = cpu_to_le32(size); @@ -1830,47 +1918,27 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, return dump_file; } -void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) { - struct iwl_fw_dump_ptrs *fw_error_dump; + struct iwl_fw_dump_ptrs fw_error_dump = {}; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; u32 dump_mask = fwrt->fw->dbg.dump_mask; - IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { - IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); - goto out; - } - - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - goto out; - - if (fwrt->trans->ini_valid) - dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump); - else - dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); - - if (!dump_file) { - kfree(fw_error_dump); + dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump); + if (!dump_file) goto out; - } if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only) dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; - if (!fwrt->trans->ini_valid) - fw_error_dump->trans_ptr = - iwl_trans_dump_data(fwrt->trans, dump_mask); - + fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); file_len = le32_to_cpu(dump_file->file_len); - fw_error_dump->fwrt_len = file_len; - if (fw_error_dump->trans_ptr) { - file_len += fw_error_dump->trans_ptr->len; + fw_error_dump.fwrt_len = file_len; + + if (fw_error_dump.trans_ptr) { + file_len += fw_error_dump.trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); } @@ -1878,27 +1946,49 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), - fw_error_dump->fwrt_ptr, - fw_error_dump->fwrt_len, 0); - if (fw_error_dump->trans_ptr) + fw_error_dump.fwrt_ptr, + fw_error_dump.fwrt_len, 0); + if (fw_error_dump.trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), - fw_error_dump->trans_ptr->data, - fw_error_dump->trans_ptr->len, - fw_error_dump->fwrt_len); + fw_error_dump.trans_ptr->data, + fw_error_dump.trans_ptr->len, + fw_error_dump.fwrt_len); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } - vfree(fw_error_dump->fwrt_ptr); - vfree(fw_error_dump->trans_ptr); - kfree(fw_error_dump); + vfree(fw_error_dump.fwrt_ptr); + vfree(fw_error_dump.trans_ptr); out: iwl_fw_free_dump_desc(fwrt); clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); - IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); } -IWL_EXPORT_SYMBOL(iwl_fw_error_dump); + +static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_error_dump_file *dump_file; + struct scatterlist *sg_dump_data; + u32 file_len; + + dump_file = iwl_fw_error_ini_dump_file(fwrt); + if (!dump_file) + goto out; + + file_len = le32_to_cpu(dump_file->file_len); + + sg_dump_data = alloc_sgtable(file_len); + if (sg_dump_data) { + sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), + dump_file, file_len, 0); + dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, + GFP_KERNEL); + } + vfree(dump_file); +out: + fwrt->dump.ini_trig_id = IWL_FW_TRIGGER_ID_INVALID; + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); +} const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { @@ -1912,6 +2002,17 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, bool monitor_only, unsigned int delay) { + u32 trig_type = le32_to_cpu(desc->trig_desc.type); + int ret; + + if (fwrt->trans->ini_valid) { + ret = iwl_fw_dbg_ini_collect(fwrt, trig_type); + if (!ret) + iwl_fw_free_dump_desc(fwrt); + + return ret; + } + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) return -EBUSY; @@ -1953,10 +2054,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); -int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger) +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; @@ -1993,50 +2094,64 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); } -IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect); +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); -int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - u32 id, const char *str, size_t len) +int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_trigger_id id) { - struct iwl_fw_dump_desc *desc; struct iwl_fw_ini_active_triggers *active; u32 occur, delay; - if (!fwrt->trans->ini_valid) - return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL); + if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id))) + return -EINVAL; - if (id == FW_DBG_TRIGGER_USER) - id = IWL_FW_TRIGGER_ID_USER_TRIGGER; + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) + return -EBUSY; active = &fwrt->dump.active_trigs[id]; - - if (WARN_ON(!active->active)) - return -EINVAL; - delay = le32_to_cpu(active->trig->dump_delay); occur = le32_to_cpu(active->trig->occurrences); if (!occur) return 0; + active->trig->occurrences = cpu_to_le32(--occur); + if (le32_to_cpu(active->trig->force_restart)) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id); iwl_force_nmi(fwrt->trans); return 0; } - desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); - if (!desc) - return -ENOMEM; + fwrt->dump.ini_trig_id = id; - active->trig->occurrences = cpu_to_le32(--occur); + IWL_WARN(fwrt, "Collecting data: ini trigger %d fired.\n", id); - desc->len = len; - desc->trig_desc.type = cpu_to_le32(id); - memcpy(desc->trig_desc.data, str, len); + schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay)); - return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay); + return 0; } -IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); +IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect); + +int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id) +{ + int id; + + switch (legacy_trigger_id) { + case FW_DBG_TRIGGER_FW_ASSERT: + case FW_DBG_TRIGGER_ALIVE_TIMEOUT: + case FW_DBG_TRIGGER_DRIVER: + id = IWL_FW_TRIGGER_ID_FW_ASSERT; + break; + case FW_DBG_TRIGGER_USER: + id = IWL_FW_TRIGGER_ID_USER_TRIGGER; + break; + default: + return -EIO; + } + + return _iwl_fw_dbg_ini_collect(fwrt, id); +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, @@ -2064,8 +2179,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, len = strlen(buf) + 1; } - ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, - trigger); + ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, + trigger); if (ret) return ret; @@ -2139,9 +2254,20 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt) return; } + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); + return; + } + iwl_fw_dbg_stop_recording(fwrt, ¶ms); - iwl_fw_error_dump(fwrt); + IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); + if (fwrt->trans->ini_valid) + iwl_fw_error_ini_dump(fwrt); + else + iwl_fw_error_dump(fwrt); + IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); /* start recording again if the firmware is not crashed */ if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && @@ -2285,6 +2411,10 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt, .data = { data->data, }, }; + /* currently the driver supports always on domain only */ + if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON) + return; + iwl_trans_send_cmd(fwrt->trans, &hcmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index a199056234d3..cccf91db74c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -108,18 +108,17 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) fwrt->dump.umac_err_id = 0; } -void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); -int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger); +int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_trigger_id id); +int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - u32 id, const char *str, size_t len); + enum iwl_fw_dbg_trigger trig, const char *str, + size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); @@ -229,10 +228,8 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trig; u32 usec; - - - if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM || - !fwrt->dump.active_trigs[id].active) + if (!fwrt->trans->ini_valid || id == IWL_FW_TRIGGER_ID_INVALID || + id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active) return false; trig = fwrt->dump.active_trigs[id].trig; @@ -461,4 +458,14 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, /* This bit is used to differentiate the legacy dump from the ini dump */ #define INI_DUMP_BIT BIT(31) +static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) +{ + if (fwrt->trans->ini_valid && fwrt->trans->hw_error) { + _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR); + fwrt->trans->hw_error = false; + } else { + iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0); + } +} + #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 9b5077bd46c3..260097c75427 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -211,6 +211,9 @@ struct iwl_fw_error_dump_info { * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer * @fw_mon_base_ptr: base pointer of the data * @fw_mon_cycle_cnt: number of wraparounds + * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit + * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold + * MSB 32 bits * @reserved: for future use * @data: captured data */ @@ -218,7 +221,8 @@ struct iwl_fw_error_dump_fw_mon { __le32 fw_mon_wr_ptr; __le32 fw_mon_base_ptr; __le32 fw_mon_cycle_cnt; - __le32 reserved[3]; + __le32 fw_mon_base_high_ptr; + __le32 reserved[2]; u8 data[]; } __packed; @@ -274,25 +278,33 @@ struct iwl_fw_error_dump_mem { u8 data[]; }; +#define IWL_INI_DUMP_MEM_VER 1 +#define IWL_INI_DUMP_MONITOR_VER 1 +#define IWL_INI_DUMP_FIFO_VER 1 + /** * struct iwl_fw_ini_error_dump_range - range of memory - * @start_addr: the start address of this range * @range_data_size: the size of this range, in bytes + * @start_addr: the start address of this range * @data: the actual memory */ struct iwl_fw_ini_error_dump_range { - __le32 start_addr; __le32 range_data_size; + __le64 start_addr; __le32 data[]; } __packed; /** * struct iwl_fw_ini_error_dump_header - ini region dump header + * @version: dump version + * @region_id: id of the region * @num_of_ranges: number of ranges in this region * @name_len: number of bytes allocated to the name string of this region * @name: name of the region */ struct iwl_fw_ini_error_dump_header { + __le32 version; + __le32 region_id; __le32 num_of_ranges; __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; @@ -312,12 +324,23 @@ struct iwl_fw_ini_error_dump { #define IWL_RXF_UMAC_BIT BIT(31) /** + * struct iwl_fw_ini_error_dump_register - ini register dump + * @addr: address of the register + * @data: data of the register + */ +struct iwl_fw_ini_error_dump_register { + __le32 addr; + __le32 data; +} __packed; + +/** * struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump * @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to * distinguish between lmac and umac * @num_of_registers: num of registers to dump, dword size each - * @range_data_size: the size of the registers and fifo data - * @data: fifo data + * @range_data_size: the size of the data + * @data: consist of + * num_of_registers * (register address + register value) + fifo data */ struct iwl_fw_ini_fifo_error_dump_range { __le32 fifo_num; @@ -351,13 +374,13 @@ struct iwl_fw_error_dump_rb { }; /** - * struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump + * struct iwl_fw_ini_monitor_dump - ini monitor dump * @header - header of the region - * @write_ptr - write pointer position in the dram + * @write_ptr - write pointer position in the buffer * @cycle_cnt - cycles count * @ranges - the memory ranges of this this region */ -struct iwl_fw_ini_monitor_dram_dump { +struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; __le32 write_ptr; __le32 cycle_cnt; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index e06407dc088b..cd622af90077 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -275,8 +275,15 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * version of the beacon notification. * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of * BEACON_FILTER_CONFIG_API_S_VER_4. + * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of + * REGULATORY_NVM_GET_INFO_RSP_API_S. * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S. + * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of + * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of + * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. + * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of + * STA_CONTEXT_DOT11AX_API_S * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -303,7 +310,10 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46, IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47, + IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50, + IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -353,6 +363,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band * (6 GHz). + * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT @@ -423,6 +434,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, + IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index a5fe1a8ca426..88a558e082a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -145,6 +145,7 @@ struct iwl_fw_runtime { u32 lmac_err_id[MAX_NUM_LMAC]; u32 umac_err_id; void *fifo_iter; + enum iwl_fw_ini_trigger_id ini_trig_id; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 93070848280a..0a93383791f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -383,6 +383,9 @@ struct iwl_csr_params { * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @umac_prph_offset: offset to add to UMAC periphery address + * @uhb_supported: ultra high band channels supported + * @min_256_ba_txq_size: minimum number of slots required in a TX queue which + * supports 256 BA aggregation * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -433,7 +436,8 @@ struct iwl_cfg { gen2:1, cdb:1, dbgc_supported:1, - bisr_workaround:1; + bisr_workaround:1, + uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -450,6 +454,12 @@ struct iwl_cfg { u32 d3_debug_data_length; u32 min_txq_size; u32 umac_prph_offset; + u32 fw_mon_smem_write_ptr_addr; + u32 fw_mon_smem_write_ptr_msk; + u32 fw_mon_smem_cycle_cnt_ptr_addr; + u32 fw_mon_smem_cycle_cnt_ptr_msk; + u32 gp2_reg_addr; + u32 min_256_ba_txq_size; }; extern const struct iwl_csr_params iwl_csr_v1; @@ -573,6 +583,7 @@ extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0; #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index e539bc94eff7..2b98ecdcf301 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -8,7 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -337,6 +337,7 @@ enum { #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) +#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) /* HW_RF CHIP ID */ #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index c7070760a10a..0e8664375298 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,7 +28,7 @@ * * BSD LICENSE * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,6 +73,9 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv); int offset_size = copy_size; + if (le32_to_cpu(header->tlv_version) != 1) + return; + if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, "Invalid apply point id %d\n", apply_point)) return; @@ -133,6 +136,9 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, hdr = (void *)&tlv->data[0]; apply = le32_to_cpu(hdr->apply_point); + if (le32_to_cpu(hdr->tlv_version) != 1) + continue; + IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n", le32_to_cpu(tlv->type), apply); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 87d6de7efdd2..40985dc552b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -130,7 +130,7 @@ enum nvm_sku_bits { /* * These are the channel numbers in the order that they are stored in the NVM */ -static const u8 iwl_nvm_channels[] = { +static const u16 iwl_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ @@ -139,7 +139,7 @@ static const u8 iwl_nvm_channels[] = { 149, 153, 157, 161, 165 }; -static const u8 iwl_ext_nvm_channels[] = { +static const u16 iwl_ext_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ @@ -148,14 +148,27 @@ static const u8 iwl_ext_nvm_channels[] = { 149, 153, 157, 161, 165, 169, 173, 177, 181 }; +static const u16 iwl_uhb_nvm_channels[] = { + /* 2.4 GHz */ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + /* 5 GHz */ + 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, + 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, + 149, 153, 157, 161, 165, 169, 173, 177, 181, + /* 6-7 GHz */ + 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241, + 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297, + 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353, + 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409, + 413, 417, 421 +}; + #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) +#define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) #define NUM_2GHZ_CHANNELS 14 -#define NUM_2GHZ_CHANNELS_EXT 14 #define FIRST_2GHZ_HT_MINUS 5 #define LAST_2GHZ_HT_PLUS 9 -#define LAST_5GHZ_HT 165 -#define LAST_5GHZ_HT_FAMILY_8000 181 #define N_HW_ADDR_MASK 0xF /* rate data (static) */ @@ -213,7 +226,7 @@ enum iwl_nvm_channel_flags { }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, - int chan, u16 flags) + int chan, u32 flags) { #define CHECK_AND_PRINT_I(x) \ ((flags & NVM_CHANNEL_##x) ? " " #x : "") @@ -244,20 +257,16 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, } static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, - u16 nvm_flags, const struct iwl_cfg *cfg) + u32 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = IEEE80211_CHAN_NO_HT40; - u32 last_5ghz_ht = LAST_5GHZ_HT; - - if (cfg->nvm_type == IWL_NVM_EXT) - last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (ch_num <= LAST_2GHZ_HT_PLUS) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; if (ch_num >= FIRST_2GHZ_HT_MINUS) flags &= ~IEEE80211_CHAN_NO_HT40MINUS; - } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { + } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; else @@ -292,30 +301,36 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, - const __le16 * const nvm_ch_flags, - u32 sbands_flags) + const void * const nvm_ch_flags, + u32 sbands_flags, bool v4) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; - u16 ch_flags; - int num_of_ch, num_2ghz_channels; - const u8 *nvm_chan; - - if (cfg->nvm_type != IWL_NVM_EXT) { - num_of_ch = IWL_NVM_NUM_CHANNELS; - nvm_chan = &iwl_nvm_channels[0]; - num_2ghz_channels = NUM_2GHZ_CHANNELS; - } else { + u32 ch_flags; + int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS; + const u16 *nvm_chan; + + if (cfg->uhb_supported) { + num_of_ch = IWL_NVM_NUM_CHANNELS_UHB; + nvm_chan = iwl_uhb_nvm_channels; + } else if (cfg->nvm_type == IWL_NVM_EXT) { num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; - nvm_chan = &iwl_ext_nvm_channels[0]; - num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; + nvm_chan = iwl_ext_nvm_channels; + } else { + num_of_ch = IWL_NVM_NUM_CHANNELS; + nvm_chan = iwl_nvm_channels; } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { bool is_5ghz = (ch_idx >= num_2ghz_channels); - ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); + if (v4) + ch_flags = + __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx); + else + ch_flags = + __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx); if (is_5ghz && !data->sku_cap_band_52ghz_enable) continue; @@ -636,12 +651,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains) { - if (sband->band == NL80211_BAND_2GHZ || - sband->band == NL80211_BAND_5GHZ) - sband->iftype_data = iwl_he_capa; - else - return; - + sband->iftype_data = iwl_he_capa; sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ @@ -661,15 +671,15 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, - const __le16 *nvm_ch_flags, u8 tx_chains, - u8 rx_chains, u32 sbands_flags) + const void *nvm_ch_flags, u8 tx_chains, + u8 rx_chains, u32 sbands_flags, bool v4) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, - sbands_flags); + sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -1006,22 +1016,18 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, - sbands_flags); + sbands_flags, false); data->calib_version = 255; return data; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); -static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, +static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; - u32 last_5ghz_ht = LAST_5GHZ_HT; - - if (cfg->nvm_type == IWL_NVM_EXT) - last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) { @@ -1029,8 +1035,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; - } else if (nvm_chan[ch_idx] <= last_5ghz_ht && - (nvm_flags & NVM_CHANNEL_40MHZ)) { + } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else @@ -1074,18 +1079,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; - const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? - iwl_ext_nvm_channels : iwl_nvm_channels; + const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; - int size_of_regd, regd_to_copy; struct ieee80211_reg_rule *rule; struct regdb_ptrs *regdb_ptrs; enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; - int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? - IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; + int max_num_ch; + + if (cfg->uhb_supported) { + max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; + nvm_chan = iwl_uhb_nvm_channels; + } else if (cfg->nvm_type == IWL_NVM_EXT) { + max_num_ch = IWL_NVM_NUM_CHANNELS_EXT; + nvm_chan = iwl_ext_nvm_channels; + } else { + max_num_ch = IWL_NVM_NUM_CHANNELS; + nvm_chan = iwl_nvm_channels; + } if (WARN_ON(num_of_ch > max_num_ch)) num_of_ch = max_num_ch; @@ -1097,11 +1110,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, num_of_ch); /* build a regdomain rule for every valid channel */ - size_of_regd = - sizeof(struct ieee80211_regdomain) + - num_of_ch * sizeof(struct ieee80211_reg_rule); - - regd = kzalloc(size_of_regd, GFP_KERNEL); + regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); @@ -1177,14 +1186,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, * Narrow down regdom for unused regulatory rules to prevent hole * between reg rules to wmm rules. */ - regd_to_copy = sizeof(struct ieee80211_regdomain) + - valid_rules * sizeof(struct ieee80211_reg_rule); - - copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL); - if (!copy_rd) { + copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules), + GFP_KERNEL); + if (!copy_rd) copy_rd = ERR_PTR(-ENOMEM); - goto out; - } out: kfree(regdb_ptrs); @@ -1393,7 +1398,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw) { struct iwl_nvm_get_info cmd = {}; - struct iwl_nvm_get_info_rsp *rsp; struct iwl_nvm_data *nvm; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, @@ -1408,12 +1412,24 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; + /* + * All the values in iwl_nvm_get_info_rsp v4 are the same as + * in v3, except for the channel profile part of the + * regulatory. So we can just access the new struct, with the + * exception of the latter. + */ + struct iwl_nvm_get_info_rsp *rsp; + struct iwl_nvm_get_info_rsp_v3 *rsp_v3; + bool v4 = fw_has_api(&fw->ucode_capa, + IWL_UCODE_TLV_API_REGULATORY_NVM_INFO); + size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); + void *channel_profile; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) return ERR_PTR(ret); - if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), + if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size, "Invalid payload len in NVM response from FW %d", iwl_rx_packet_payload_len(hcmd.resp_pkt))) { ret = -EINVAL; @@ -1475,11 +1491,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } + rsp_v3 = (void *)rsp; + channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : + (void *)rsp_v3->regulatory.channel_profile; + iwl_init_sbands(trans->dev, trans->cfg, nvm, rsp->regulatory.channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant, - sbands_flags); + sbands_flags, v4); iwl_free_resp(&hcmd); return nvm; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 1af9f9e1ecd4..8e6a0c363c0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -368,6 +368,12 @@ #define MON_BUFF_WRPTR_VER2 (0xa03c24) #define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) #define MON_BUFF_SHIFT_VER2 (0x8) +/* FW monitor familiy AX210 and on */ +#define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20) +#define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24) +#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c) +#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c) +#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff) #define MON_DMARB_RD_CTL_ADDR (0xa03c60) #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index d8690acee40c..1e4c9ef548cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -274,7 +274,6 @@ struct iwl_rx_cmd_buffer { bool _page_stolen; u32 _rx_page_order; unsigned int truesize; - u8 status; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) @@ -768,6 +767,7 @@ struct iwl_self_init_dram { * @umac_error_event_table: addr of umac error table * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. use enum &iwl_error_event_table_status + * @hw_error: equals true if hw error interrupt was received from the FW */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -830,6 +830,7 @@ struct iwl_trans { u32 lmac_error_event_table[2]; u32 umac_error_event_table; unsigned int error_event_table_tlv_status; + bool hw_error; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 808bc6f363d0..83fd7f93d9f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1728,9 +1728,12 @@ void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm, iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, >kdata); } +#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ + IWL_SCAN_MAX_PROFILES) + struct iwl_mvm_nd_query_results { u32 matched_profiles; - struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; + u8 matches[ND_QUERY_BUF_LEN]; }; static int @@ -1743,6 +1746,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, .flags = CMD_WANT_SKB, }; int ret, len; + size_t query_len, matches_len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { @@ -1750,8 +1754,19 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, return ret; } + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + query_len = sizeof(struct iwl_scan_offload_profiles_query); + matches_len = sizeof(struct iwl_scan_offload_profile_match) * + IWL_SCAN_MAX_PROFILES; + } else { + query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); + matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * + IWL_SCAN_MAX_PROFILES; + } + len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len < sizeof(*query)) { + if (len < query_len) { IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); ret = -EIO; goto out_free_resp; @@ -1760,7 +1775,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, query = (void *)cmd.resp_pkt->data; results->matched_profiles = le32_to_cpu(query->matched_profiles); - memcpy(results->matches, query->matches, sizeof(results->matches)); + memcpy(results->matches, query->matches, matches_len); #ifdef CONFIG_IWLWIFI_DEBUGFS mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); @@ -1771,6 +1786,57 @@ out_free_resp: return ret; } +static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *query, + int idx) +{ + int n_chans = 0, i; + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + struct iwl_scan_offload_profile_match *matches = + (struct iwl_scan_offload_profile_match *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) + n_chans += hweight8(matches[idx].matching_channels[i]); + } else { + struct iwl_scan_offload_profile_match_v1 *matches = + (struct iwl_scan_offload_profile_match_v1 *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) + n_chans += hweight8(matches[idx].matching_channels[i]); + } + + return n_chans; +} + +static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *query, + struct cfg80211_wowlan_nd_match *match, + int idx) +{ + int i; + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + struct iwl_scan_offload_profile_match *matches = + (struct iwl_scan_offload_profile_match *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) + if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[i]->center_freq; + } else { + struct iwl_scan_offload_profile_match_v1 *matches = + (struct iwl_scan_offload_profile_match_v1 *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) + if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[i]->center_freq; + } +} + static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1783,7 +1849,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, struct iwl_wowlan_status *fw_status; unsigned long matched_profiles; u32 reasons = 0; - int i, j, n_matches, ret; + int i, n_matches, ret; fw_status = iwl_mvm_get_wakeup_status(mvm); if (!IS_ERR_OR_NULL(fw_status)) { @@ -1817,14 +1883,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, goto out_report_nd; for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { - struct iwl_scan_offload_profile_match *fw_match; struct cfg80211_wowlan_nd_match *match; int idx, n_channels = 0; - fw_match = &query.matches[i]; - - for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) - n_channels += hweight8(fw_match->matching_channels[j]); + n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); @@ -1844,10 +1906,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, if (mvm->n_nd_channels < n_channels) continue; - for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++) - if (fw_match->matching_channels[j / 8] & (BIT(j % 8))) - match->channels[match->n_channels++] = - mvm->nd_channels[j]->center_freq; + iwl_mvm_query_set_freqs(mvm, &query, match, i); } out_report_nd: @@ -2030,7 +2089,6 @@ out: * 2. We are using a unified image but had an error while exiting D3 */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); - set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); /* * When switching images we return 1, which causes mac80211 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 6925527d8457..f043eefabb4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -743,9 +743,8 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, vif, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, vif, \ + &iwl_dbgfs_##name##_ops); \ } while (0) MVM_DEBUGFS_READ_FILE_OPS(mac_params); @@ -811,12 +810,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf); - if (!mvmvif->dbgfs_slink) - IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n", - dbgfs_dir); - return; -err: - IWL_ERR(mvm, "Can't create debugfs entity\n"); } void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 776b24f54200..d4ff6b44de2c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, return 0; iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, - (count - 1)); + (count - 1), NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); @@ -1696,9 +1696,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ - if (!debugfs_create_file(alias, mode, parent, mvm, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(alias, mode, parent, mvm, \ + &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) @@ -1709,9 +1708,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf, _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ - if (!debugfs_create_file(alias, mode, parent, sta, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(alias, mode, parent, sta, \ + &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) @@ -2092,13 +2090,9 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, if (iwl_mvm_has_tlc_offload(mvm)) MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); - - return; -err: - IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n"); } -int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) +void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { struct dentry *bcast_dir __maybe_unused; char buf[100]; @@ -2142,14 +2136,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); - if (!debugfs_create_bool("enable_scan_iteration_notif", - 0600, - mvm->debugfs_dir, - &mvm->scan_iter_notif_enabled)) - goto err; - if (!debugfs_create_bool("drop_bcn_ap_mode", 0600, - mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) - goto err; + debugfs_create_bool("enable_scan_iteration_notif", 0600, + mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); + debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, + &mvm->drop_bcn_ap_mode); MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); @@ -2157,13 +2147,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { bcast_dir = debugfs_create_dir("bcast_filtering", mvm->debugfs_dir); - if (!bcast_dir) - goto err; - if (!debugfs_create_bool("override", 0600, - bcast_dir, - &mvm->dbgfs_bcast_filtering.override)) - goto err; + debugfs_create_bool("override", 0600, bcast_dir, + &mvm->dbgfs_bcast_filtering.override); MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, bcast_dir, 0600); @@ -2175,35 +2161,26 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); - if (!debugfs_create_bool("d3_wake_sysassert", 0600, - mvm->debugfs_dir, &mvm->d3_wake_sysassert)) - goto err; - if (!debugfs_create_u32("last_netdetect_scans", 0400, - mvm->debugfs_dir, &mvm->last_netdetect_scans)) - goto err; + debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, + &mvm->d3_wake_sysassert); + debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, + &mvm->last_netdetect_scans); #endif - if (!debugfs_create_u8("ps_disabled", 0400, - mvm->debugfs_dir, &mvm->ps_disabled)) - goto err; - if (!debugfs_create_blob("nvm_hw", 0400, - mvm->debugfs_dir, &mvm->nvm_hw_blob)) - goto err; - if (!debugfs_create_blob("nvm_sw", 0400, - mvm->debugfs_dir, &mvm->nvm_sw_blob)) - goto err; - if (!debugfs_create_blob("nvm_calib", 0400, - mvm->debugfs_dir, &mvm->nvm_calib_blob)) - goto err; - if (!debugfs_create_blob("nvm_prod", 0400, - mvm->debugfs_dir, &mvm->nvm_prod_blob)) - goto err; - if (!debugfs_create_blob("nvm_phy_sku", 0400, - mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) - goto err; - if (!debugfs_create_blob("nvm_reg", S_IRUSR, - mvm->debugfs_dir, &mvm->nvm_reg_blob)) - goto err; + debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, + &mvm->ps_disabled); + debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, + &mvm->nvm_hw_blob); + debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, + &mvm->nvm_sw_blob); + debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, + &mvm->nvm_calib_blob); + debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, + &mvm->nvm_prod_blob); + debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, + &mvm->nvm_phy_sku_blob); + debugfs_create_blob("nvm_reg", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_reg_blob); debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops); @@ -2212,11 +2189,5 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) * exists (before the opmode exists which removes the target.) */ snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent); - if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf)) - goto err; - - return 0; -err: - IWL_ERR(mvm, "Can't create the mvm debugfs directory\n"); - return -ENOMEM; + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 6a70dece447d..fcec25b7b679 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -262,9 +262,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; - u32 ac; - int ret, i, queue_limit; - unsigned long used_hw_queues; + int ret, i; lockdep_assert_held(&mvm->mutex); @@ -341,37 +339,9 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; - /* No need to allocate data queues to P2P Device MAC.*/ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; - + /* No need to allocate data queues to P2P Device MAC and NAN.*/ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; - } - - /* - * queues in mac80211 almost entirely independent of - * the ones here - no real limit - */ - queue_limit = IEEE80211_MAX_QUEUES; - - /* - * Find available queues, and allocate them to the ACs. When in - * DQA-mode they aren't really used, and this is done only so the - * mac80211 ieee80211_check_queues() function won't fail - */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit); - - if (queue >= queue_limit) { - IWL_ERR(mvm, "Failed to allocate queue\n"); - ret = -EIO; - goto exit_fail; - } - - __set_bit(queue, &used_hw_queues); - vif->hw_queue[ac] = queue; - } /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP || @@ -1143,9 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, ieee80211_tu_to_usec(data.beacon_int * rand / 100); } else { - mvmvif->ap_beacon_time = - iwl_read_prph(mvm->trans, - DEVICE_SYSTEM_TIME_REG); + mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm); } } @@ -1573,6 +1541,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); + mvmvif = iwl_mvm_vif_from_mac80211(vif); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -1581,7 +1550,6 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, csa_vif != vif)) goto out_unlock; - mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", @@ -1602,6 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, return; case NL80211_IFTYPE_STATION: iwl_mvm_csa_client_absent(mvm, vif); + cancel_delayed_work_sync(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; default: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6a3b11dd2edf..5c52469288be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -420,6 +420,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) const static u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; @@ -597,6 +598,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; + hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; @@ -732,6 +736,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(he_iftypes_ext_capa); + + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); + ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; @@ -1191,15 +1198,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { - /* clear the D3 reconfig, we only need it to avoid dumping a - * firmware coredump on reconfiguration, we shouldn't do that - * on D3->D0 transition - */ - if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fwrt.dump.desc = &iwl_dump_desc_assert; - iwl_fw_error_dump(&mvm->fwrt); - } - /* cleanup all stale references (scan, roc), but keep the * ucode_down ref until reconfig is complete */ @@ -1500,6 +1498,91 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } +static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + + if (mvmvif->csa_failed) { + mvmvif->csa_failed = false; + ret = -EIO; + goto out_unlock; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmvif->csa_bcn_pending = false; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) { + ret = -EIO; + goto out_unlock; + } + + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); + + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_unlock; + + iwl_mvm_stop_session_protection(mvm, vif); + } + + mvmvif->ps_disabled = false; + + ret = iwl_mvm_power_update_ps(mvm); + +out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + }; + + IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); + + mutex_lock(&mvm->mutex); + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + 0, sizeof(cmd), &cmd)); + mutex_unlock(&mvm->mutex); + + WARN_ON(iwl_mvm_post_channel_switch(hw, vif)); +} + +static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + + mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); + vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + mvm = mvmvif->mvm; + + iwl_mvm_abort_channel_switch(mvm->hw, vif); + ieee80211_chswitch_done(vif, false); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1626,6 +1709,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, } iwl_mvm_tcm_add_vif(mvm, vif); + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; @@ -2127,6 +2212,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; + int size = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_MBSSID_HE) ? + sizeof(sta_ctxt_cmd) : + sizeof(struct iwl_he_sta_context_cmd_v1); struct ieee80211_sta *sta; u32 flags; int i; @@ -2254,16 +2343,18 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, /* Set the PPE thresholds accordingly */ if (low_th >= 0 && high_th >= 0) { - u8 ***pkt_ext_qam = - (void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th; + struct iwl_he_pkt_ext *pkt_ext = + (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext; for (i = 0; i < MAX_HE_SUPP_NSS; i++) { u8 bw; for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { - pkt_ext_qam[i][bw][0] = low_th; - pkt_ext_qam[i][bw][1] = high_th; + pkt_ext->pkt_ext_qam_th[i][bw][0] = + low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = + high_th; } } @@ -2308,13 +2399,23 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } - /* TODO: support Multi BSSID IE */ + if (vif->bss_conf.nontransmitted) { + flags |= STA_CTXT_HE_REF_BSSID_VALID; + ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, + vif->bss_conf.transmitter_bssid); + sta_ctxt_cmd.max_bssid_indicator = + vif->bss_conf.bssid_indicator; + sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; + sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; + sta_ctxt_cmd.profile_periodicity = + vif->bss_conf.profile_periodicity; + } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, DATA_PATH_GROUP, 0), - 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd)) + 0, size, &sta_ctxt_cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } @@ -3612,7 +3713,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration) { - int res, time_reg = DEVICE_SYSTEM_TIME_REG; + int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; @@ -3638,7 +3739,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 0); /* Set the time and duration */ - tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)); + tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); @@ -4364,8 +4465,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, int err; u32 noa_duration; - err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, - NULL); + err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, + iwl_mvm_tm_policy, NULL); if (err) return err; @@ -4442,16 +4543,22 @@ static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, + .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); + if (chsw->delay) + cmd.cs_delayed_bcn_count = + DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); + return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } +#define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) @@ -4516,8 +4623,18 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); - if (chsw->block_tx) + if (chsw->block_tx) { iwl_mvm_csa_client_absent(mvm, vif); + /* + * In case of undetermined / long time with immediate + * quiet monitor status to gracefully disconnect + */ + if (!chsw->count || + chsw->count * vif->bss_conf.beacon_int > + IWL_MAX_CSA_BLOCK_TX) + schedule_delayed_work(&mvmvif->csa_work, + msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); + } if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); @@ -4532,6 +4649,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); + + mvmvif->csa_count = chsw->count; + mvmvif->csa_misbehave = false; break; default: break; @@ -4552,52 +4672,42 @@ out_unlock: return ret; } -static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - - mutex_lock(&mvm->mutex); - - if (mvmvif->csa_failed) { - mvmvif->csa_failed = false; - ret = -EIO; - goto out_unlock; - } - - if (vif->type == NL80211_IFTYPE_STATION) { - struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), + .tsf = cpu_to_le32(chsw->timestamp), + .cs_count = chsw->count, + .cs_mode = chsw->block_tx, + }; - mvmvif->csa_bcn_pending = false; - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) + return; - if (WARN_ON(!mvmsta)) { - ret = -EIO; - goto out_unlock; + if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { + if (mvmvif->csa_misbehave) { + /* Second time, give up on this AP*/ + iwl_mvm_abort_channel_switch(hw, vif); + ieee80211_chswitch_done(vif, false); + mvmvif->csa_misbehave = false; + return; } - - iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - goto out_unlock; - - iwl_mvm_stop_session_protection(mvm, vif); + mvmvif->csa_misbehave = true; } + mvmvif->csa_count = chsw->count; - mvmvif->ps_disabled = false; - - ret = iwl_mvm_power_update_ps(mvm); - -out_unlock: - mutex_unlock(&mvm->mutex); + IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); - return ret; + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + CMD_ASYNC, sizeof(cmd), &cmd)); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) @@ -5056,6 +5166,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, + .abort_channel_switch = iwl_mvm_abort_channel_switch, + .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index a50dc53df086..8dc2a9850bc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -490,6 +490,9 @@ struct iwl_mvm_vif { bool csa_countdown; bool csa_failed; u16 csa_target_freq; + u16 csa_count; + u16 csa_misbehave; + struct delayed_work csa_work; /* Indicates that we are waiting for a beacon on a new channel */ bool csa_bcn_pending; @@ -1199,7 +1202,6 @@ struct iwl_mvm { * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running - * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA */ @@ -1211,7 +1213,6 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, - IWL_MVM_STATUS_D3_RECONFIG, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, }; @@ -1537,6 +1538,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime); +u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, @@ -1649,8 +1651,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); -void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb, int queue); +void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, @@ -1784,14 +1786,13 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); +void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else -static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, - struct dentry *dbgfs_dir) +static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, + struct dentry *dbgfs_dir) { - return 0; } static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -2023,17 +2024,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); - /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received - * an assert. Since we failed to bring the interface up, mac80211 - * will not attempt to reconfig the device, - * which handles the dump collection in assert flow, - * so trigger dump collection here. - */ - if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status)) - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - false, 0); - iwl_fw_cancel_timestamp(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fwrt_stop_device(&mvm->fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 13681b03c10e..8da9e5572fcf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -862,9 +862,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, min_backoff = iwl_mvm_min_backoff(mvm); iwl_mvm_thermal_initialize(mvm, min_backoff); - err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); - if (err) - goto out_unregister; + iwl_mvm_dbgfs_register(mvm, dbgfs_dir); if (!iwl_mvm_has_new_rx_stats_api(mvm)) memset(&mvm->rx_stats_v3, 0, @@ -881,14 +879,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, return op_mode; - out_unregister: - if (iwlmvm_mod_params.init_dbg) - return op_mode; - - ieee80211_unregister_hw(mvm->hw); - mvm->hw_registered = false; - iwl_mvm_leds_exit(mvm); - iwl_mvm_thermal_exit(mvm); out_free: iwl_fw_flush_dump(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); @@ -1105,7 +1095,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) - iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0); + iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } @@ -1291,8 +1281,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - false, 0); + iwl_fw_error_collect(&mvm->fwrt); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1340,6 +1329,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } + iwl_fw_error_collect(&mvm->fwrt); + if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index a28283ff7295..79f9eaf8dd1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -345,6 +345,37 @@ out: rcu_read_unlock(); } +static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) +{ + const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + + if (vht_cap && vht_cap->vht_supported) { + switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + return IEEE80211_MAX_MPDU_LEN_VHT_11454; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: + return IEEE80211_MAX_MPDU_LEN_VHT_7991; + default: + return IEEE80211_MAX_MPDU_LEN_VHT_3895; + } + + } else if (ht_cap && ht_cap->ht_supported) { + if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU) + /* + * agg is offloaded so we need to assume that agg + * are enabled and max mpdu in ampdu is 4095 + * (spec 802.11-2016 9.3.2.1) + */ + return IEEE80211_MAX_MPDU_LEN_HT_BA; + else + return IEEE80211_MAX_MPDU_LEN_HT_3839; + } + + /* in legacy mode no amsdu is enabled so return zero */ + return 0; +} + void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update) { @@ -353,14 +384,15 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0); struct ieee80211_supported_band *sband; + u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), - .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + .max_mpdu_len = cpu_to_le16(max_amsdu_len), .amsdu = iwl_mvm_is_csum_supported(mvm), }; int ret; @@ -373,6 +405,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; rs_fw_set_supp_rates(sta, sband, &cfg_cmd); + /* + * since TLC offload works with one mode we can assume + * that only vht/ht is used and also set it as station max amsdu + */ + sta->max_amsdu_len = max_amsdu_len; + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index e231a44d2423..c182821ab22b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -4078,9 +4078,8 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta) #define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, lq_sta, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, lq_sta, \ + &iwl_dbgfs_##name##_ops); \ } while (0) MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); @@ -4108,9 +4107,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta, &lq_sta->pers.dbg_fixed_txp_reduction); MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600); - return; -err: - IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n"); } void rs_remove_sta_debugfs(void *mvm, void *mvm_sta) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index b516fd1867ec..1824566d08fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1699,8 +1699,8 @@ out: rcu_read_unlock(); } -void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, - struct iwl_rx_cmd_buffer *rxb, int queue) +void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -1721,10 +1721,6 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; - /* Currently only NDP type is supported */ - if (info_type != RX_NO_DATA_INFO_TYPE_NDP) - return; - energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; @@ -1746,9 +1742,22 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; - /* currently this is the only type for which we get this notif */ - rx_status->zero_length_psdu_type = - IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; + + switch (info_type) { + case RX_NO_DATA_INFO_TYPE_NDP: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; + break; + case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: + case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; + break; + default: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; + break; + } /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 78694bc38e76..d9ddf9ff6428 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1082,21 +1082,23 @@ static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, dwell->extended = IWL_SCAN_DWELL_EXTENDED; } -static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) +static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels, + u32 max_channels) { struct ieee80211_supported_band *band; int i, j = 0; band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; - for (i = 0; i < band->n_channels; i++, j++) + for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < band->n_channels; i++, j++) + for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; } static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) + u32 flags, u8 channel_flags, + u32 max_channels) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); struct iwl_scan_config_v1 *cfg = config; @@ -1115,11 +1117,12 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; - iwl_mvm_fill_channels(mvm, cfg->channel_array); + iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, - u32 flags, u8 channel_flags) + u32 flags, u8 channel_flags, + u32 max_channels) { struct iwl_scan_config *cfg = config; @@ -1162,7 +1165,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; - iwl_mvm_fill_channels(mvm, cfg->channel_array); + iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } int iwl_mvm_config_scan(struct iwl_mvm *mvm) @@ -1181,7 +1184,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) u8 channel_flags; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) - return -ENOBUFS; + num_channels = mvm->fw->ucode_capa.n_scan_channels; if (iwl_mvm_is_cdb_supported(mvm)) { type = iwl_mvm_get_scan_type_band(mvm, NULL, @@ -1234,9 +1237,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; - iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); + iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags, + num_channels); } else { - iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags); + iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, + num_channels); } cmd.data[0] = cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 98d123dd7177..eb452e9dce05 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2277,7 +2277,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, + .fifo = vif->type == NL80211_IFTYPE_AP ? + IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, .sta_id = msta->sta_id, .tid = 0, .aggregate = false, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 859aa5a4e6b5..9df21a8d1fc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -7,7 +7,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(C) 2018 Intel Corporation + * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(C) 2018 Intel Corporation + * Copyright(C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -252,8 +252,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, /* we only send requests to our switching peer - update sent time */ if (state == IWL_MVM_TDLS_SW_REQ_SENT) - mvm->tdls_cs.peer.sent_timestamp = - iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); + mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); if (state == IWL_MVM_TDLS_SW_IDLE) mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 9693fa4cdc39..50314018d157 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -234,6 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, break; } iwl_mvm_csa_client_absent(mvm, te_data->vif); + cancel_delayed_work_sync(&mvmvif->csa_work); ieee80211_chswitch_done(te_data->vif, true); break; default: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 4649327abb45..b9914efc55c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1418,6 +1418,16 @@ void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); } +u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) +{ + u32 reg_addr = DEVICE_SYSTEM_TIME_REG; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 && + mvm->trans->cfg->gp2_reg_addr) + reg_addr = mvm->trans->cfg->gp2_reg_addr; + + return iwl_read_prph(mvm->trans, reg_addr); +} void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) { @@ -1432,7 +1442,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) iwl_mvm_power_update_device(mvm); } - *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); + *gp2 = iwl_mvm_get_systime(mvm); *boottime = ktime_get_boot_ns(); if (!ps_disabled) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 9f1af8da9dc1..70d0fa0eae2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -963,9 +963,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, - {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, - {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)}, @@ -1047,9 +1044,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* register transport layer debugfs here */ - ret = iwl_trans_pcie_dbgfs_register(iwl_trans); - if (ret) - goto out_free_drv; + iwl_trans_pcie_dbgfs_register(iwl_trans); /* if RTPM is in use, enable it in our device */ if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { @@ -1078,8 +1073,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -out_free_drv: - iwl_drv_stop(iwl_trans->drv); out_free_trans: iwl_trans_pcie_free(iwl_trans); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 59213164f35e..4bf745c7bd6c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -106,7 +106,6 @@ struct iwl_host_cmd; * @page: driver's pointer to the rxb page * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table - * @size: size used from the buffer */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; @@ -114,7 +113,6 @@ struct iwl_rx_mem_buffer { u16 vid; bool invalid; struct list_head list; - u32 size; }; /** @@ -135,46 +133,32 @@ struct isr_statistics { u32 unhandled; }; -#define IWL_RX_TD_TYPE_MSK 0xff000000 -#define IWL_RX_TD_SIZE_MSK 0x00ffffff -#define IWL_RX_TD_SIZE_2K BIT(11) -#define IWL_RX_TD_TYPE 0 - /** * struct iwl_rx_transfer_desc - transfer descriptor - * @type_n_size: buffer type (bit 0: external buff valid, - * bit 1: optional footer valid, bit 2-7: reserved) - * and buffer size * @addr: ptr to free buffer start address * @rbid: unique tag of the buffer * @reserved: reserved */ struct iwl_rx_transfer_desc { - __le32 type_n_size; - __le64 addr; __le16 rbid; - __le16 reserved; + __le16 reserved[3]; + __le64 addr; } __packed; -#define IWL_RX_CD_SIZE 0xffffff00 +#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) /** * struct iwl_rx_completion_desc - completion descriptor - * @type: buffer type (bit 0: external buff valid, - * bit 1: optional footer valid, bit 2-7: reserved) - * @status: status of the completion * @reserved1: reserved * @rbid: unique tag of the received buffer - * @size: buffer size, masked by IWL_RX_CD_SIZE + * @flags: flags (0: fragmented, all others: reserved) * @reserved2: reserved */ struct iwl_rx_completion_desc { - u8 type; - u8 status; - __le16 reserved1; + __le32 reserved1; __le16 rbid; - __le32 size; - u8 reserved2[22]; + u8 flags; + u8 reserved2[25]; } __packed; /** @@ -1046,12 +1030,9 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); +void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); #else -static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) -{ - return 0; -} +static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 8d4f0628622b..69fcfa930791 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -282,9 +282,8 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { struct iwl_rx_transfer_desc *bd = rxq->bd; - bd[rxq->write].type_n_size = - cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) | - ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK)); + BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); + bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); bd[rxq->write].rbid = cpu_to_le16(rxb->vid); } else { @@ -1265,9 +1264,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, .truesize = max_len, }; - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - rxcb.status = rxq->cd[i].status; - pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { @@ -1394,6 +1390,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb; u16 vid; + BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); + if (!trans->cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; @@ -1415,9 +1413,6 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; - rxb->invalid = true; return rxb; @@ -2212,6 +2207,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) "Hardware error detected. Restarting.\n"); isr_stats->hw++; + trans->hw_error = true; iwl_pcie_irq_handle_error(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index c4375b868901..cccb8bbd7ea7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2442,9 +2442,8 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans) #ifdef CONFIG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, trans, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, trans, \ + &iwl_dbgfs_##name##_ops); \ } while (0) /* file operation */ @@ -2847,7 +2846,7 @@ static const struct file_operations iwl_dbgfs_monitor_data_ops = { }; /* Create the debugfs files and directories */ -int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) +void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir; @@ -2858,11 +2857,6 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); - return 0; - -err: - IWL_ERR(trans, "failed to create the trans debugfs entry\n"); - return -ENOMEM; } static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) @@ -3012,10 +3006,14 @@ static void iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { - u32 base, write_ptr, wrap_cnt; + u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; - /* If there was a dest TLV - use the values from there */ - if (trans->ini_valid) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; + base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; + write_ptr = DBGC_CUR_DBGBUF_STATUS; + wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; + } else if (trans->ini_valid) { base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2); write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2); wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2); @@ -3028,12 +3026,18 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); + + write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + fw_mon_data->fw_mon_base_high_ptr = + cpu_to_le32(iwl_read_prph(trans, base_high)); + write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; + } + fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); } static u32 @@ -3044,9 +3048,10 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, u32 len = 0; if ((trans->num_blocks && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - (trans->dbg_dest_tlv && !trans->ini_valid) || - (trans->ini_valid && trans->num_blocks)) { + (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 || + trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 || + trans->ini_valid)) || + (trans->dbg_dest_tlv && !trans->ini_valid)) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); @@ -3165,8 +3170,10 @@ static struct iwl_trans_dump_data len = sizeof(*dump_data); /* host commands */ - len += sizeof(*data) + - cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) + len += sizeof(*data) + + cmdq->n_window * (sizeof(*txcmd) + + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) @@ -3540,6 +3547,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { trans->cfg = &iwlax210_2ax_cfg_so_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) { + trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0; } } else if (cfg == &iwl_ax101_cfg_qu_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 7be73e2c4681..4a9522fb682f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -999,7 +999,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) slots_num = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size); else - slots_num = TFD_TX_CMD_SLOTS; + slots_num = max_t(u32, TFD_TX_CMD_SLOTS, + trans->cfg->min_256_ba_txq_size); trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); @@ -1052,7 +1053,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) slots_num = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size); else - slots_num = TFD_TX_CMD_SLOTS; + slots_num = max_t(u32, TFD_TX_CMD_SLOTS, + trans->cfg->min_256_ba_txq_size); ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 524eb5805995..0dcb511f44e2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -409,8 +409,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, int err; u32 val; - err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, - hwsim_vendor_test_policy, NULL); + err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, + data_len, hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) @@ -1936,8 +1936,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, struct sk_buff *skb; int err, ps; - err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len, - hwsim_testmode_policy, NULL); + err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len, + hwsim_testmode_policy, NULL); if (err) return err; @@ -2810,6 +2810,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); + + /* We only have SW crypto and only implement the A-MPDU API + * (but don't really build A-MPDUs) so can have extended key + * support + */ + ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); @@ -3631,35 +3637,35 @@ done: static const struct genl_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_register_received_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_FRAME, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_cloned_frame_received_nl, }, { .cmd = HWSIM_CMD_TX_INFO_FRAME, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_tx_info_frame_received_nl, }, { .cmd = HWSIM_CMD_NEW_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_DEL_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_GET_RADIO, - .policy = hwsim_genl_policy, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, @@ -3669,6 +3675,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = { .name = "MAC80211_HWSIM", .version = 1, .maxattr = HWSIM_ATTR_MAX, + .policy = hwsim_genl_policy, .netnsok = true, .module = THIS_MODULE, .ops = hwsim_ops, @@ -3905,6 +3912,8 @@ static int __init init_mac80211_hwsim(void) param.p2p_device = support_p2p_device; param.use_chanctx = channels > 1; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; + if (param.p2p_device) + param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); err = mac80211_hwsim_new_radio(NULL, ¶m); if (err < 0) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index c46f0a54a0c7..e11a4bb67172 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -4059,8 +4059,8 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, if (!priv) return -EINVAL; - err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy, - NULL); + err = nla_parse_deprecated(tb, MWIFIEX_TM_ATTR_MAX, data, len, + mwifiex_tm_policy, NULL); if (err) return err; @@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) { dev_err(priv->adapter->dev, "Failed to process hostcmd\n"); + kfree(hostcmd); return -EFAULT; } /* process hostcmd response*/ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len); - if (!skb) + if (!skb) { + kfree(hostcmd); return -ENOMEM; + } err = nla_put(skb, MWIFIEX_TM_ATTR_DATA, hostcmd->len, hostcmd->cmd); if (err) { + kfree(hostcmd); kfree_skb(skb); return -EMSGSIZE; } diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 60db2b969e20..8c35441fd9b7 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -341,6 +341,12 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) sleep_cfm_tmp = dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); + if (!sleep_cfm_tmp) { + mwifiex_dbg(adapter, ERROR, + "SLEEP_CFM: dev_alloc_skb failed\n"); + return -ENOMEM; + } + skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm) + MWIFIEX_TYPE_LEN); put_unaligned_le32(MWIFIEX_USB_TYPE_CMD, sleep_cfm_tmp->data); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 20cee5c397fb..f6da8edab7f1 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { skb->priority = cfg80211_classify8021d(skb, NULL); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index fb28a5c7f441..52a2ce2e78b0 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -250,7 +250,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, local_rx_pd->nf); } } else { - if (rx_pkt_type != PKT_TYPE_BAR) + if (rx_pkt_type != PKT_TYPE_BAR && + local_rx_pd->priority < MAX_NUM_TID) priv->rx_seq[local_rx_pd->priority] = seq_num; memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN); diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 14b569b6d1b5..7cea08f71838 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -13,12 +13,11 @@ #define QTNF_MAX_MAC 3 enum qtnf_fw_state { - QTNF_FW_STATE_RESET, - QTNF_FW_STATE_FW_DNLD_DONE, + QTNF_FW_STATE_DETACHED, QTNF_FW_STATE_BOOT_DONE, QTNF_FW_STATE_ACTIVE, - QTNF_FW_STATE_DETACHED, - QTNF_FW_STATE_EP_DEAD, + QTNF_FW_STATE_RUNNING, + QTNF_FW_STATE_DEAD, }; struct qtnf_bus; @@ -50,6 +49,7 @@ struct qtnf_bus { struct napi_struct mux_napi; struct net_device mux_dev; struct workqueue_struct *workqueue; + struct workqueue_struct *hprio_workqueue; struct work_struct fw_work; struct work_struct event_work; struct mutex bus_lock; /* lock during command/event processing */ @@ -58,6 +58,23 @@ struct qtnf_bus { char bus_priv[0] __aligned(sizeof(void *)); }; +static inline bool qtnf_fw_is_up(struct qtnf_bus *bus) +{ + enum qtnf_fw_state state = bus->fw_state; + + return ((state == QTNF_FW_STATE_ACTIVE) || + (state == QTNF_FW_STATE_RUNNING)); +} + +static inline bool qtnf_fw_is_attached(struct qtnf_bus *bus) +{ + enum qtnf_fw_state state = bus->fw_state; + + return ((state == QTNF_FW_STATE_ACTIVE) || + (state == QTNF_FW_STATE_RUNNING) || + (state == QTNF_FW_STATE_DEAD)); +} + static inline void *get_bus_priv(struct qtnf_bus *bus) { if (WARN(!bus, "qtnfmac: invalid bus pointer")) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index dcb0991432f4..c78500bcaa2d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -144,6 +144,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { struct net_device *netdev = wdev->netdev; struct qtnf_vif *vif; + struct sk_buff *skb; if (WARN_ON(!netdev)) return -EFAULT; @@ -157,6 +158,11 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (netif_carrier_ok(netdev)) netif_carrier_off(netdev); + while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) + dev_kfree_skb_any(skb); + + cancel_work_sync(&vif->high_pri_tx_work); + if (netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(netdev); @@ -424,13 +430,13 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, *cookie = short_cookie; if (params->offchan) - flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN; + flags |= QLINK_FRAME_TX_FLAG_OFFCHAN; if (params->no_cck) - flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK; + flags |= QLINK_FRAME_TX_FLAG_NO_CCK; if (params->dont_wait_for_ack) - flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT; + flags |= QLINK_FRAME_TX_FLAG_ACK_NOWAIT; /* If channel is not specified, pass "freq = 0" to tell device * firmware to use current channel. @@ -445,9 +451,8 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da, params->len, short_cookie, flags); - return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags, - freq, - params->buf, params->len); + return qtnf_cmd_send_frame(vif, short_cookie, flags, + freq, params->buf, params->len); } static int @@ -993,53 +998,31 @@ static struct cfg80211_ops qtn_cfg80211_ops = { #endif }; -static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, +static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *req) { - struct qtnf_wmac *mac = wiphy_priv(wiphy_in); - struct qtnf_bus *bus = mac->bus; - struct wiphy *wiphy; - unsigned int mac_idx; + struct qtnf_wmac *mac = wiphy_priv(wiphy); enum nl80211_band band; int ret; pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator, req->alpha2[0], req->alpha2[1]); - ret = qtnf_cmd_reg_notify(bus, req); + ret = qtnf_cmd_reg_notify(mac, req); if (ret) { - if (ret == -EOPNOTSUPP) { - pr_warn("reg update not supported\n"); - } else if (ret == -EALREADY) { - pr_info("regulatory domain is already set to %c%c", - req->alpha2[0], req->alpha2[1]); - } else { - pr_err("failed to update reg domain to %c%c\n", - req->alpha2[0], req->alpha2[1]); - } - + pr_err("MAC%u: failed to update region to %c%c: %d\n", + mac->macid, req->alpha2[0], req->alpha2[1], ret); return; } - for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) { - if (!(bus->hw_info.mac_bitmap & (1 << mac_idx))) + for (band = 0; band < NUM_NL80211_BANDS; ++band) { + if (!wiphy->bands[band]) continue; - mac = bus->mac[mac_idx]; - if (!mac) - continue; - - wiphy = priv_to_wiphy(mac); - - for (band = 0; band < NUM_NL80211_BANDS; ++band) { - if (!wiphy->bands[band]) - continue; - - ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]); - if (ret) - pr_err("failed to get chan info for mac %u band %u\n", - mac_idx, band); - } + ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]); + if (ret) + pr_err("MAC%u: failed to update band %u\n", + mac->macid, band); } } @@ -1095,6 +1078,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) struct wiphy *wiphy = priv_to_wiphy(mac); struct qtnf_mac_info *macinfo = &mac->macinfo; int ret; + bool regdomain_is_known; if (!wiphy) { pr_err("invalid wiphy pointer\n"); @@ -1127,7 +1111,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH | - WIPHY_FLAG_4ADDR_STATION; + WIPHY_FLAG_4ADDR_STATION | + WIPHY_FLAG_NETNS_OK; wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) @@ -1166,11 +1151,19 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->wowlan = macinfo->wowlan; #endif + regdomain_is_known = isalpha(mac->rd->alpha2[0]) && + isalpha(mac->rd->alpha2[1]); + if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) { - wiphy->regulatory_flags |= REGULATORY_STRICT_REG | - REGULATORY_CUSTOM_REG; wiphy->reg_notifier = qtnf_cfg80211_reg_notifier; - wiphy_apply_custom_regulatory(wiphy, hw_info->rd); + + if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') { + wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | + REGULATORY_STRICT_REG; + wiphy_apply_custom_regulatory(wiphy, mac->rd); + } else if (regdomain_is_known) { + wiphy->regulatory_flags |= REGULATORY_STRICT_REG; + } } else { wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } @@ -1193,10 +1186,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) goto out; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) - ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd); - else if (isalpha(hw_info->rd->alpha2[0]) && - isalpha(hw_info->rd->alpha2[1])) - ret = regulatory_hint(wiphy, hw_info->rd->alpha2); + ret = regulatory_set_wiphy_regd(wiphy, mac->rd); + else if (regdomain_is_known) + ret = regulatory_hint(wiphy, mac->rd->alpha2); out: return ret; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 85a2a58f4c16..22313a46c3ae 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -11,6 +11,13 @@ #include "bus.h" #include "commands.h" +#define QTNF_SCAN_TIME_AUTO 0 + +/* Let device itself to select best values for current conditions */ +#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO +#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO +#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO + static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp, u16 cmd_id, u8 mac_id, u8 vif_id, size_t resp_size) @@ -89,8 +96,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id); - if (bus->fw_state != QTNF_FW_STATE_ACTIVE && - cmd_id != QLINK_CMD_FW_INIT) { + if (!qtnf_fw_is_up(bus) && cmd_id != QLINK_CMD_FW_INIT) { pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", mac_id, vif_id, cmd_id, bus->fw_state); dev_kfree_skb(cmd_skb); @@ -177,14 +183,6 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type, memcpy(tlv->ie_data, buf, len); } -static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl) -{ - size_t size = sizeof(struct qlink_acl_data) + - acl->n_acl_entries * sizeof(struct qlink_mac_address); - - return size; -} - static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif, const struct cfg80211_ap_settings *s) { @@ -203,7 +201,7 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif, if (s->acl) len += sizeof(struct qlink_tlv_hdr) + - qtnf_cmd_acl_data_size(s->acl); + struct_size(s->acl, mac_addrs, s->acl->n_acl_entries); if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) { pr_err("VIF%u.%u: can not fit AP settings: %u\n", @@ -310,7 +308,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, } if (s->acl) { - size_t acl_size = qtnf_cmd_acl_data_size(s->acl); + size_t acl_size = struct_size(s->acl, mac_addrs, + s->acl->n_acl_entries); struct qlink_tlv_hdr *tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size); @@ -382,11 +381,11 @@ out: return ret; } -int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, - u16 freq, const u8 *buf, size_t len) +int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, + u16 freq, const u8 *buf, size_t len) { struct sk_buff *cmd_skb; - struct qlink_cmd_mgmt_frame_tx *cmd; + struct qlink_cmd_frame_tx *cmd; int ret; if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) { @@ -396,14 +395,14 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, } cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, - QLINK_CMD_SEND_MGMT_FRAME, + QLINK_CMD_SEND_FRAME, sizeof(*cmd)); if (!cmd_skb) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); - cmd = (struct qlink_cmd_mgmt_frame_tx *)cmd_skb->data; + cmd = (struct qlink_cmd_frame_tx *)cmd_skb->data; cmd->cookie = cpu_to_le32(cookie); cmd->freq = cpu_to_le16(freq); cmd->flags = cpu_to_le16(flags); @@ -786,8 +785,25 @@ int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif, int use4addr, u8 *mac_addr) { - return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, - QLINK_CMD_CHANGE_INTF); + int ret; + + ret = qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, + QLINK_CMD_CHANGE_INTF); + + /* Regulatory settings may be different for different interface types */ + if (ret == 0 && vif->wdev.iftype != iftype) { + enum nl80211_band band; + struct wiphy *wiphy = priv_to_wiphy(vif->mac); + + for (band = 0; band < NUM_NL80211_BANDS; ++band) { + if (!wiphy->bands[band]) + continue; + + qtnf_cmd_band_info_get(vif->mac, wiphy->bands[band]); + } + } + + return ret; } int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) @@ -831,55 +847,6 @@ out: return ret; } -static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags) -{ - u32 flags = 0; - - if (qflags & QLINK_RRF_NO_OFDM) - flags |= NL80211_RRF_NO_OFDM; - - if (qflags & QLINK_RRF_NO_CCK) - flags |= NL80211_RRF_NO_CCK; - - if (qflags & QLINK_RRF_NO_INDOOR) - flags |= NL80211_RRF_NO_INDOOR; - - if (qflags & QLINK_RRF_NO_OUTDOOR) - flags |= NL80211_RRF_NO_OUTDOOR; - - if (qflags & QLINK_RRF_DFS) - flags |= NL80211_RRF_DFS; - - if (qflags & QLINK_RRF_PTP_ONLY) - flags |= NL80211_RRF_PTP_ONLY; - - if (qflags & QLINK_RRF_PTMP_ONLY) - flags |= NL80211_RRF_PTMP_ONLY; - - if (qflags & QLINK_RRF_NO_IR) - flags |= NL80211_RRF_NO_IR; - - if (qflags & QLINK_RRF_AUTO_BW) - flags |= NL80211_RRF_AUTO_BW; - - if (qflags & QLINK_RRF_IR_CONCURRENT) - flags |= NL80211_RRF_IR_CONCURRENT; - - if (qflags & QLINK_RRF_NO_HT40MINUS) - flags |= NL80211_RRF_NO_HT40MINUS; - - if (qflags & QLINK_RRF_NO_HT40PLUS) - flags |= NL80211_RRF_NO_HT40PLUS; - - if (qflags & QLINK_RRF_NO_80MHZ) - flags |= NL80211_RRF_NO_80MHZ; - - if (qflags & QLINK_RRF_NO_160MHZ) - flags |= NL80211_RRF_NO_160MHZ; - - return flags; -} - static int qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, const struct qlink_resp_get_hw_info *resp, @@ -887,7 +854,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, { struct qtnf_hw_info *hwinfo = &bus->hw_info; const struct qlink_tlv_hdr *tlv; - const struct qlink_tlv_reg_rule *tlv_rule; const char *bld_name = NULL; const char *bld_rev = NULL; const char *bld_type = NULL; @@ -898,19 +864,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, const char *calibration_ver = NULL; const char *uboot_ver = NULL; u32 hw_ver = 0; - struct ieee80211_reg_rule *rule; u16 tlv_type; u16 tlv_value_len; - unsigned int rule_idx = 0; - - if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) - return -E2BIG; - - hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules, - resp->n_reg_rules), GFP_KERNEL); - - if (!hwinfo->rd) - return -ENOMEM; hwinfo->num_mac = resp->num_mac; hwinfo->mac_bitmap = resp->mac_bitmap; @@ -919,30 +874,11 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, hwinfo->total_tx_chain = resp->total_tx_chain; hwinfo->total_rx_chain = resp->total_rx_chain; hwinfo->hw_capab = le32_to_cpu(resp->hw_capab); - hwinfo->rd->n_reg_rules = resp->n_reg_rules; - hwinfo->rd->alpha2[0] = resp->alpha2[0]; - hwinfo->rd->alpha2[1] = resp->alpha2[1]; bld_tmstamp = le32_to_cpu(resp->bld_tmstamp); plat_id = le32_to_cpu(resp->plat_id); hw_ver = le32_to_cpu(resp->hw_ver); - switch (resp->dfs_region) { - case QLINK_DFS_FCC: - hwinfo->rd->dfs_region = NL80211_DFS_FCC; - break; - case QLINK_DFS_ETSI: - hwinfo->rd->dfs_region = NL80211_DFS_ETSI; - break; - case QLINK_DFS_JP: - hwinfo->rd->dfs_region = NL80211_DFS_JP; - break; - case QLINK_DFS_UNSET: - default: - hwinfo->rd->dfs_region = NL80211_DFS_UNSET; - break; - } - tlv = (const struct qlink_tlv_hdr *)resp->info; while (info_len >= sizeof(*tlv)) { @@ -956,37 +892,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, } switch (tlv_type) { - case QTN_TLV_ID_REG_RULE: - if (rule_idx >= resp->n_reg_rules) { - pr_warn("unexpected number of rules: %u\n", - resp->n_reg_rules); - return -EINVAL; - } - - if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) { - pr_warn("malformed TLV 0x%.2X; LEN: %u\n", - tlv_type, tlv_value_len); - return -EINVAL; - } - - tlv_rule = (const struct qlink_tlv_reg_rule *)tlv; - rule = &hwinfo->rd->reg_rules[rule_idx++]; - - rule->freq_range.start_freq_khz = - le32_to_cpu(tlv_rule->start_freq_khz); - rule->freq_range.end_freq_khz = - le32_to_cpu(tlv_rule->end_freq_khz); - rule->freq_range.max_bandwidth_khz = - le32_to_cpu(tlv_rule->max_bandwidth_khz); - rule->power_rule.max_antenna_gain = - le32_to_cpu(tlv_rule->max_antenna_gain); - rule->power_rule.max_eirp = - le32_to_cpu(tlv_rule->max_eirp); - rule->dfs_cac_ms = - le32_to_cpu(tlv_rule->dfs_cac_ms); - rule->flags = qtnf_cmd_resp_reg_rule_flags_parse( - le32_to_cpu(tlv_rule->flags)); - break; case QTN_TLV_ID_BUILD_NAME: bld_name = (const void *)tlv->val; break; @@ -1019,17 +924,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } - if (rule_idx != resp->n_reg_rules) { - pr_warn("unexpected number of rules: expected %u got %u\n", - resp->n_reg_rules, rule_idx); - kfree(hwinfo->rd); - hwinfo->rd = NULL; - return -EINVAL; - } - - pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u, capab=0x%x\n", + pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n", hwinfo->fw_ver, hwinfo->mac_bitmap, - hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1], hwinfo->total_tx_chain, hwinfo->total_rx_chain, hwinfo->hw_capab); @@ -1042,7 +938,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, "\nHardware ID: %s" \ "\nCalibration version: %s" \ "\nU-Boot version: %s" \ - "\nHardware version: 0x%08x", + "\nHardware version: 0x%08x\n", bld_name, bld_rev, bld_type, bld_label, (unsigned long)bld_tmstamp, (unsigned long)plat_id, @@ -1085,9 +981,12 @@ qtnf_parse_wowlan_info(struct qtnf_wmac *mac, } } -static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, - const u8 *tlv_buf, size_t tlv_buf_size) +static int +qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, + const struct qlink_resp_get_mac_info *resp, + size_t tlv_buf_size) { + const u8 *tlv_buf = resp->var_info; struct ieee80211_iface_combination *comb = NULL; size_t n_comb = 0; struct ieee80211_iface_limit *limits; @@ -1105,6 +1004,38 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, u8 ext_capa_len = 0; u8 ext_capa_mask_len = 0; int i = 0; + struct ieee80211_reg_rule *rule; + unsigned int rule_idx = 0; + const struct qlink_tlv_reg_rule *tlv_rule; + + if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) + return -E2BIG; + + mac->rd = kzalloc(sizeof(*mac->rd) + + sizeof(struct ieee80211_reg_rule) * + resp->n_reg_rules, GFP_KERNEL); + if (!mac->rd) + return -ENOMEM; + + mac->rd->n_reg_rules = resp->n_reg_rules; + mac->rd->alpha2[0] = resp->alpha2[0]; + mac->rd->alpha2[1] = resp->alpha2[1]; + + switch (resp->dfs_region) { + case QLINK_DFS_FCC: + mac->rd->dfs_region = NL80211_DFS_FCC; + break; + case QLINK_DFS_ETSI: + mac->rd->dfs_region = NL80211_DFS_ETSI; + break; + case QLINK_DFS_JP: + mac->rd->dfs_region = NL80211_DFS_JP; + break; + case QLINK_DFS_UNSET: + default: + mac->rd->dfs_region = NL80211_DFS_UNSET; + break; + } tlv = (const struct qlink_tlv_hdr *)tlv_buf; while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) { @@ -1225,6 +1156,23 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, mac->macinfo.wowlan = NULL; qtnf_parse_wowlan_info(mac, wowlan); break; + case QTN_TLV_ID_REG_RULE: + if (rule_idx >= resp->n_reg_rules) { + pr_warn("unexpected number of rules: %u\n", + resp->n_reg_rules); + return -EINVAL; + } + + if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + tlv_rule = (const struct qlink_tlv_reg_rule *)tlv; + rule = &mac->rd->reg_rules[rule_idx++]; + qlink_utils_regrule_q2nl(rule, tlv_rule); + break; default: pr_warn("MAC%u: unknown TLV type %u\n", mac->macid, tlv_type); @@ -1253,6 +1201,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, return -EINVAL; } + if (rule_idx != resp->n_reg_rules) { + pr_warn("unexpected number of rules: expected %u got %u\n", + resp->n_reg_rules, rule_idx); + return -EINVAL; + } + if (ext_capa_len > 0) { ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL); if (!ext_capa) @@ -1663,7 +1617,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) resp = (const struct qlink_resp_get_mac_info *)resp_skb->data; qtnf_cmd_resp_proc_mac_info(mac, resp); - ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len); + ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len); out: qtnf_bus_unlock(mac->bus); @@ -1709,21 +1663,7 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, struct qlink_resp_band_info_get *resp; size_t info_len = 0; int ret = 0; - u8 qband; - - switch (band->band) { - case NL80211_BAND_2GHZ: - qband = QLINK_BAND_2GHZ; - break; - case NL80211_BAND_5GHZ: - qband = QLINK_BAND_5GHZ; - break; - case NL80211_BAND_60GHZ: - qband = QLINK_BAND_60GHZ; - break; - default: - return -EINVAL; - } + u8 qband = qlink_utils_band_cfg2q(band->band); cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, QLINK_CMD_BAND_INFO_GET, @@ -2107,22 +2047,23 @@ out: static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb, const struct ieee80211_channel *sc) { - struct qlink_tlv_channel *qchan; - u32 flags = 0; - - qchan = skb_put_zero(cmd_skb, sizeof(*qchan)); - qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL); - qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr)); - qchan->chan.center_freq = cpu_to_le16(sc->center_freq); - qchan->chan.hw_value = cpu_to_le16(sc->hw_value); - - if (sc->flags & IEEE80211_CHAN_NO_IR) - flags |= QLINK_CHAN_NO_IR; - - if (sc->flags & IEEE80211_CHAN_RADAR) - flags |= QLINK_CHAN_RADAR; - - qchan->chan.flags = cpu_to_le32(flags); + struct qlink_tlv_channel *tlv; + struct qlink_channel *qch; + + tlv = skb_put_zero(cmd_skb, sizeof(*tlv)); + qch = &tlv->chan; + tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL); + tlv->hdr.len = cpu_to_le16(sizeof(*qch)); + + qch->center_freq = cpu_to_le16(sc->center_freq); + qch->hw_value = cpu_to_le16(sc->hw_value); + qch->band = qlink_utils_band_cfg2q(sc->band); + qch->max_power = sc->max_power; + qch->max_reg_power = sc->max_reg_power; + qch->max_antenna_gain = sc->max_antenna_gain; + qch->beacon_found = sc->beacon_found; + qch->dfs_state = qlink_utils_dfs_state_cfg2q(sc->dfs_state); + qch->flags = cpu_to_le32(qlink_utils_chflags_cfg2q(sc->flags)); } static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, @@ -2141,6 +2082,35 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN); } +static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac, + struct sk_buff *cmd_skb) +{ + struct cfg80211_scan_request *scan_req = mac->scan_req; + u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT; + u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT; + u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT; + + if (scan_req->duration) { + dwell_active = scan_req->duration; + dwell_passive = scan_req->duration; + } + + pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n", + mac->macid, + scan_req->duration_mandatory ? "mandatory" : "max", + dwell_active, dwell_passive, duration); + + qtnf_cmd_skb_put_tlv_u16(cmd_skb, + QTN_TLV_ID_SCAN_DWELL_ACTIVE, + dwell_active); + qtnf_cmd_skb_put_tlv_u16(cmd_skb, + QTN_TLV_ID_SCAN_DWELL_PASSIVE, + dwell_passive); + qtnf_cmd_skb_put_tlv_u16(cmd_skb, + QTN_TLV_ID_SCAN_SAMPLE_DURATION, + duration); +} + int qtnf_cmd_send_scan(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb; @@ -2192,6 +2162,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) } } + qtnf_cmd_scan_set_dwell(mac, cmd_skb); + if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n", mac->macid, @@ -2207,15 +2179,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH); } - if (scan_req->duration) { - pr_debug("MAC%u: %s scan duration %u\n", mac->macid, - scan_req->duration_mandatory ? "mandatory" : "max", - scan_req->duration); - - qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL, - scan_req->duration); - } - ret = qtnf_cmd_send(mac->bus, cmd_skb); if (ret) goto out; @@ -2404,13 +2367,17 @@ out: return ret; } -int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) +int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req) { + struct wiphy *wiphy = priv_to_wiphy(mac); + struct qtnf_bus *bus = mac->bus; struct sk_buff *cmd_skb; int ret; struct qlink_cmd_reg_notify *cmd; + enum nl80211_band band; + const struct ieee80211_supported_band *cfg_band; - cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, QLINK_CMD_REG_NOTIFY, sizeof(*cmd)); if (!cmd_skb) @@ -2447,12 +2414,40 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) break; } + switch (req->dfs_region) { + case NL80211_DFS_FCC: + cmd->dfs_region = QLINK_DFS_FCC; + break; + case NL80211_DFS_ETSI: + cmd->dfs_region = QLINK_DFS_ETSI; + break; + case NL80211_DFS_JP: + cmd->dfs_region = QLINK_DFS_JP; + break; + default: + cmd->dfs_region = QLINK_DFS_UNSET; + break; + } + + cmd->num_channels = 0; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + unsigned int i; + + cfg_band = wiphy->bands[band]; + if (!cfg_band) + continue; + + cmd->num_channels += cfg_band->n_channels; + + for (i = 0; i < cfg_band->n_channels; ++i) { + qtnf_cmd_channel_tlv_add(cmd_skb, + &cfg_band->channels[i]); + } + } + qtnf_bus_lock(bus); ret = qtnf_cmd_send(bus, cmd_skb); - if (ret) - goto out; - -out: qtnf_bus_unlock(bus); return ret; @@ -2592,7 +2587,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, struct qtnf_bus *bus = vif->mac->bus; struct sk_buff *cmd_skb; struct qlink_tlv_hdr *tlv; - size_t acl_size = qtnf_cmd_acl_data_size(params); + size_t acl_size = struct_size(params, mac_addrs, params->n_acl_entries); int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 64f0b9dc8a14..6406365287fc 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -27,8 +27,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, const struct cfg80211_ap_settings *s); int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif); int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg); -int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, - u16 freq, const u8 *buf, size_t len); +int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, + u16 freq, const u8 *buf, size_t len); int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, const u8 *buf, size_t len); int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, @@ -57,7 +57,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code); int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up); -int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); +int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req); int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, struct qtnf_chan_stats *stats); int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index ee1b75fda1dd..54ea86ae4959 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -368,6 +368,23 @@ static void qtnf_mac_scan_timeout(struct work_struct *work) qtnf_mac_scan_finish(mac, true); } +static void qtnf_vif_send_data_high_pri(struct work_struct *work) +{ + struct qtnf_vif *vif = + container_of(work, struct qtnf_vif, high_pri_tx_work); + struct sk_buff *skb; + + if (!vif->netdev || + vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + return; + + while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) { + qtnf_cmd_send_frame(vif, 0, QLINK_FRAME_TX_FLAG_8023, + 0, skb->data, skb->len); + dev_kfree_skb_any(skb); + } +} + static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, unsigned int macid) { @@ -395,7 +412,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, vif->mac = mac; vif->vifid = i; qtnf_sta_list_init(&vif->sta_list); - + INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri); + skb_queue_head_init(&vif->high_pri_tx_queue); vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!vif->stats64) pr_warn("VIF%u.%u: per cpu stats allocation failed\n", @@ -499,6 +517,8 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) qtnf_mac_iface_comb_free(mac); qtnf_mac_ext_caps_free(mac); kfree(mac->macinfo.wowlan); + kfree(mac->rd); + mac->rd = NULL; wiphy_free(wiphy); bus->mac[macid] = NULL; } @@ -587,8 +607,6 @@ int qtnf_core_attach(struct qtnf_bus *bus) int ret; qtnf_trans_init(bus); - - bus->fw_state = QTNF_FW_STATE_BOOT_DONE; qtnf_bus_data_rx_start(bus); bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0); @@ -598,6 +616,13 @@ int qtnf_core_attach(struct qtnf_bus *bus) goto error; } + bus->hprio_workqueue = alloc_workqueue("QTNF_HPRI", WQ_HIGHPRI, 0); + if (!bus->hprio_workqueue) { + pr_err("failed to alloc high prio workqueue\n"); + ret = -ENOMEM; + goto error; + } + INIT_WORK(&bus->event_work, qtnf_event_work_handler); ret = qtnf_cmd_send_init_fw(bus); @@ -607,7 +632,6 @@ int qtnf_core_attach(struct qtnf_bus *bus) } bus->fw_state = QTNF_FW_STATE_ACTIVE; - ret = qtnf_cmd_get_hw_info(bus); if (ret) { pr_err("failed to get HW info: %d\n", ret); @@ -637,11 +661,11 @@ int qtnf_core_attach(struct qtnf_bus *bus) } } + bus->fw_state = QTNF_FW_STATE_RUNNING; return 0; error: qtnf_core_detach(bus); - return ret; } EXPORT_SYMBOL_GPL(qtnf_core_attach); @@ -655,7 +679,7 @@ void qtnf_core_detach(struct qtnf_bus *bus) for (macid = 0; macid < QTNF_MAX_MAC; macid++) qtnf_core_mac_detach(bus, macid); - if (bus->fw_state == QTNF_FW_STATE_ACTIVE) + if (qtnf_fw_is_up(bus)) qtnf_cmd_send_deinit_fw(bus); bus->fw_state = QTNF_FW_STATE_DETACHED; @@ -663,10 +687,14 @@ void qtnf_core_detach(struct qtnf_bus *bus) if (bus->workqueue) { flush_workqueue(bus->workqueue); destroy_workqueue(bus->workqueue); + bus->workqueue = NULL; } - kfree(bus->hw_info.rd); - bus->hw_info.rd = NULL; + if (bus->hprio_workqueue) { + flush_workqueue(bus->hprio_workqueue); + destroy_workqueue(bus->hprio_workqueue); + bus->hprio_workqueue = NULL; + } qtnf_trans_free(bus); } @@ -684,6 +712,9 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb) struct qtnf_wmac *mac; struct qtnf_vif *vif; + if (unlikely(bus->fw_state != QTNF_FW_STATE_RUNNING)) + return NULL; + meta = (struct qtnf_frame_meta_info *) (skb_tail_pointer(skb) - sizeof(*meta)); @@ -799,6 +830,15 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb) } EXPORT_SYMBOL_GPL(qtnf_update_tx_stats); +void qtnf_packet_send_hi_pri(struct sk_buff *skb) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev); + + skb_queue_tail(&vif->high_pri_tx_queue, skb); + queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work); +} +EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri); + MODULE_AUTHOR("Quantenna Communications"); MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a31cff46e964..af8372dfb927 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -63,6 +63,8 @@ struct qtnf_vif { struct qtnf_wmac *mac; struct work_struct reset_work; + struct work_struct high_pri_tx_work; + struct sk_buff_head high_pri_tx_queue; struct qtnf_sta_list sta_list; unsigned long cons_tx_timeout_cnt; int generation; @@ -112,6 +114,7 @@ struct qtnf_wmac { struct cfg80211_scan_request *scan_req; struct mutex mac_lock; /* lock during wmac speicific ops */ struct delayed_work scan_timeout; + struct ieee80211_regdomain *rd; }; struct qtnf_hw_info { @@ -120,7 +123,6 @@ struct qtnf_hw_info { u8 mac_bitmap; u32 fw_ver; u32 hw_capab; - struct ieee80211_regdomain *rd; u8 total_tx_chain; u8 total_rx_chain; char fw_version[ETHTOOL_FWVERS_LEN]; @@ -149,6 +151,7 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev); void qtnf_netdev_updown(struct net_device *ndev, bool up); void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted); +void qtnf_packet_send_hi_pri(struct sk_buff *skb); static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index c3a32effa6f0..e4e9344b6982 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -56,7 +56,7 @@ int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) if (ret == -ETIMEDOUT) { pr_err("EP firmware is dead\n"); - bus->fw_state = QTNF_FW_STATE_EP_DEAD; + bus->fw_state = QTNF_FW_STATE_DEAD; } return ret; @@ -128,32 +128,22 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) return 0; } -void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success) +int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus) { - struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - struct pci_dev *pdev = priv->pdev; int ret; - if (boot_success) { - bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; - - ret = qtnf_core_attach(bus); - if (ret) { - pr_err("failed to attach core\n"); - boot_success = false; - } - } - - if (boot_success) { + bus->fw_state = QTNF_FW_STATE_BOOT_DONE; + ret = qtnf_core_attach(bus); + if (ret) { + pr_err("failed to attach core\n"); + } else { qtnf_debugfs_init(bus, DRV_NAME); qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); - } else { - bus->fw_state = QTNF_FW_STATE_DETACHED; } - put_device(&pdev->dev); + return ret; } static void qtnf_tune_pcie_mps(struct pci_dev *pdev) @@ -344,7 +334,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv = get_bus_priv(bus); pci_set_drvdata(pdev, bus); bus->dev = &pdev->dev; - bus->fw_state = QTNF_FW_STATE_RESET; + bus->fw_state = QTNF_FW_STATE_DETACHED; pcie_priv->pdev = pdev; pcie_priv->tx_stopped = 0; pcie_priv->rx_bd_num = rx_bd_size_param; @@ -364,6 +354,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv->pcie_irq_count = 0; pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; + pcie_priv->tx_eapol = 0; pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE"); if (!pcie_priv->workqueue) { @@ -419,8 +410,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev) cancel_work_sync(&bus->fw_work); - if (bus->fw_state == QTNF_FW_STATE_ACTIVE || - bus->fw_state == QTNF_FW_STATE_EP_DEAD) + if (qtnf_fw_is_attached(bus)) qtnf_core_detach(bus); netif_napi_del(&bus->mux_napi); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h index bbc074e1f34d..5e8b9cb68419 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h @@ -62,6 +62,7 @@ struct qtnf_pcie_bus_priv { u32 tx_done_count; u32 tx_reclaim_done; u32 tx_reclaim_req; + u32 tx_eapol; u8 msi_enabled; u8 tx_stopped; @@ -70,7 +71,7 @@ struct qtnf_pcie_bus_priv { int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb); int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv); -void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success); +int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus); void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, struct qtnf_shm_ipc_region __iomem *ipc_tx_reg, struct qtnf_shm_ipc_region __iomem *ipc_rx_reg, diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 1f5facbb8905..3aa3714d4dfd 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -980,12 +980,11 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) { struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus); + u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; + const char *fwname = QTN_PCI_PEARL_FW_NAME; struct pci_dev *pdev = ps->base.pdev; const struct firmware *fw; int ret; - u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; - const char *fwname = QTN_PCI_PEARL_FW_NAME; - bool fw_boot_success = false; if (ps->base.flashboot) { state |= QTN_RC_FW_FLASHBOOT; @@ -1031,23 +1030,23 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) goto fw_load_exit; } - pr_info("firmware is up and running\n"); - if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { pr_err("firmware runtime failure\n"); goto fw_load_exit; } - fw_boot_success = true; + pr_info("firmware is up and running\n"); -fw_load_exit: - qtnf_pcie_fw_boot_done(bus, fw_boot_success); + ret = qtnf_pcie_fw_boot_done(bus); + if (ret) + goto fw_load_exit; - if (fw_boot_success) { - qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); - qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); - } + qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); + qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); + +fw_load_exit: + put_device(&pdev->dev); } static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index cbcda57105f3..9a4380ed7f1b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -498,6 +498,13 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) int len; int i; + if (unlikely(skb->protocol == htons(ETH_P_PAE))) { + qtnf_packet_send_hi_pri(skb); + qtnf_update_tx_stats(skb->dev, skb); + priv->tx_eapol++; + return NETDEV_TX_OK; + } + spin_lock_irqsave(&priv->tx_lock, flags); if (!qtnf_tx_queue_ready(ts)) { @@ -761,6 +768,7 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data) seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); + seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol); seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); seq_printf(s, "tx_done_index(%u)\n", tx_done_index); @@ -1023,8 +1031,9 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work) { struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); - int ret; int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT; + struct pci_dev *pdev = ts->base.pdev; + int ret; qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT); @@ -1073,19 +1082,23 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work) } } + ret = qtnf_post_init_ep(ts); + if (ret) { + pr_err("FW runtime failure\n"); + goto fw_load_exit; + } + pr_info("firmware is up and running\n"); - ret = qtnf_post_init_ep(ts); + ret = qtnf_pcie_fw_boot_done(bus); if (ret) - pr_err("FW runtime failure\n"); + goto fw_load_exit; -fw_load_exit: - qtnf_pcie_fw_boot_done(bus, ret ? false : true); + qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats); + qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); - if (ret == 0) { - qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats); - qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); - } +fw_load_exit: + put_device(&pdev->dev); } static void qtnf_reclaim_tasklet_fn(unsigned long data) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 7798edcf7980..158c9eba20ef 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -6,7 +6,7 @@ #include <linux/ieee80211.h> -#define QLINK_PROTO_VER 13 +#define QLINK_PROTO_VER 15 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -206,6 +206,8 @@ struct qlink_sta_info_state { * execution status (one of &enum qlink_cmd_result). Reply message * may also contain data payload specific to the command type. * + * @QLINK_CMD_SEND_FRAME: send specified frame over the air; firmware will + * encapsulate 802.3 packet into 802.11 frame automatically. * @QLINK_CMD_BAND_INFO_GET: for the specified MAC and specified band, get * the band's description including number of operational channels and * info on each channel, HT/VHT capabilities, supported rates etc. @@ -220,7 +222,7 @@ enum qlink_cmd_type { QLINK_CMD_FW_INIT = 0x0001, QLINK_CMD_FW_DEINIT = 0x0002, QLINK_CMD_REGISTER_MGMT = 0x0003, - QLINK_CMD_SEND_MGMT_FRAME = 0x0004, + QLINK_CMD_SEND_FRAME = 0x0004, QLINK_CMD_MGMT_SET_APPIE = 0x0005, QLINK_CMD_PHY_PARAMS_GET = 0x0011, QLINK_CMD_PHY_PARAMS_SET = 0x0012, @@ -321,22 +323,26 @@ struct qlink_cmd_mgmt_frame_register { u8 do_register; } __packed; -enum qlink_mgmt_frame_tx_flags { - QLINK_MGMT_FRAME_TX_FLAG_NONE = 0, - QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN = BIT(0), - QLINK_MGMT_FRAME_TX_FLAG_NO_CCK = BIT(1), - QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2), +/** + * @QLINK_FRAME_TX_FLAG_8023: frame has a 802.3 header; if not set, frame + * is a 802.11 encapsulated. + */ +enum qlink_frame_tx_flags { + QLINK_FRAME_TX_FLAG_OFFCHAN = BIT(0), + QLINK_FRAME_TX_FLAG_NO_CCK = BIT(1), + QLINK_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2), + QLINK_FRAME_TX_FLAG_8023 = BIT(3), }; /** - * struct qlink_cmd_mgmt_frame_tx - data for QLINK_CMD_SEND_MGMT_FRAME command + * struct qlink_cmd_frame_tx - data for QLINK_CMD_SEND_FRAME command * * @cookie: opaque request identifier. * @freq: Frequency to use for frame transmission. - * @flags: Transmission flags, one of &enum qlink_mgmt_frame_tx_flags. + * @flags: Transmission flags, one of &enum qlink_frame_tx_flags. * @frame_data: frame to transmit. */ -struct qlink_cmd_mgmt_frame_tx { +struct qlink_cmd_frame_tx { struct qlink_cmd chdr; __le32 cookie; __le16 freq; @@ -580,12 +586,20 @@ enum qlink_user_reg_hint_type { * @initiator: which entity sent the request, one of &enum qlink_reg_initiator. * @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one * of &enum qlink_user_reg_hint_type. + * @num_channels: number of &struct qlink_tlv_channel in a variable portion of a + * payload. + * @dfs_region: one of &enum qlink_dfs_regions. + * @info: variable portion of regulatory notifier callback. */ struct qlink_cmd_reg_notify { struct qlink_cmd chdr; u8 alpha2[2]; u8 initiator; u8 user_reg_hint_type; + u8 num_channels; + u8 dfs_region; + u8 rsvd[2]; + u8 info[0]; } __packed; /** @@ -765,6 +779,18 @@ struct qlink_resp { } __packed; /** + * enum qlink_dfs_regions - regulatory DFS regions + * + * Corresponds to &enum nl80211_dfs_regions. + */ +enum qlink_dfs_regions { + QLINK_DFS_UNSET = 0, + QLINK_DFS_FCC = 1, + QLINK_DFS_ETSI = 2, + QLINK_DFS_JP = 3, +}; + +/** * struct qlink_resp_get_mac_info - response for QLINK_CMD_MAC_INFO command * * Data describing specific physical device providing wireless MAC @@ -779,6 +805,10 @@ struct qlink_resp { * @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band. * @max_ap_assoc_sta: Maximum number of associations supported by WMAC. * @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar. + * @alpha2: country code ID firmware is configured to. + * @n_reg_rules: number of regulatory rules TLVs in variable portion of the + * message. + * @dfs_region: regulatory DFS region, one of &enum qlink_dfs_regions. * @var_info: variable-length WMAC info data. */ struct qlink_resp_get_mac_info { @@ -792,23 +822,14 @@ struct qlink_resp_get_mac_info { __le16 radar_detect_widths; __le32 max_acl_mac_addrs; u8 bands_cap; + u8 alpha2[2]; + u8 n_reg_rules; + u8 dfs_region; u8 rsvd[1]; u8 var_info[0]; } __packed; /** - * enum qlink_dfs_regions - regulatory DFS regions - * - * Corresponds to &enum nl80211_dfs_regions. - */ -enum qlink_dfs_regions { - QLINK_DFS_UNSET = 0, - QLINK_DFS_FCC = 1, - QLINK_DFS_ETSI = 2, - QLINK_DFS_JP = 3, -}; - -/** * struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command * * Description of wireless hardware capabilities and features. @@ -820,11 +841,7 @@ enum qlink_dfs_regions { * @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware. * @total_tx_chains: total number of transmit chains used by device. * @total_rx_chains: total number of receive chains. - * @alpha2: country code ID firmware is configured to. - * @n_reg_rules: number of regulatory rules TLVs in variable portion of the - * message. - * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region. - * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE. + * @info: variable-length HW info. */ struct qlink_resp_get_hw_info { struct qlink_resp rhdr; @@ -838,9 +855,6 @@ struct qlink_resp_get_hw_info { u8 mac_bitmap; u8 total_tx_chain; u8 total_rx_chain; - u8 alpha2[2]; - u8 n_reg_rules; - u8 dfs_region; u8 info[0]; } __packed; @@ -1148,6 +1162,13 @@ struct qlink_event_external_auth { * carried by QTN_TLV_ID_STA_STATS_MAP. * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan * for in any given scan. + * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active + * scan. + * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive + * scan. + * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel + * during a scan including off-channel dwell time and operating channel + * time. */ enum qlink_tlv_id { QTN_TLV_ID_FRAG_THRESH = 0x0201, @@ -1180,7 +1201,9 @@ enum qlink_tlv_id { QTN_TLV_ID_WOWLAN_CAPAB = 0x0410, QTN_TLV_ID_WOWLAN_PATTERN = 0x0411, QTN_TLV_ID_SCAN_FLUSH = 0x0412, - QTN_TLV_ID_SCAN_DWELL = 0x0413, + QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413, + QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416, + QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417, }; struct qlink_tlv_hdr { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 72bfd17cb687..1a972bce7b8b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -182,3 +182,120 @@ void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl, memcpy(qacl->mac_addrs, acl->mac_addrs, acl->n_acl_entries * sizeof(*qacl->mac_addrs)); } + +enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band) +{ + switch (band) { + case NL80211_BAND_2GHZ: + return QLINK_BAND_2GHZ; + case NL80211_BAND_5GHZ: + return QLINK_BAND_5GHZ; + case NL80211_BAND_60GHZ: + return QLINK_BAND_60GHZ; + default: + return -EINVAL; + } +} + +enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state) +{ + switch (state) { + case NL80211_DFS_USABLE: + return QLINK_DFS_USABLE; + case NL80211_DFS_AVAILABLE: + return QLINK_DFS_AVAILABLE; + case NL80211_DFS_UNAVAILABLE: + default: + return QLINK_DFS_UNAVAILABLE; + } +} + +u32 qlink_utils_chflags_cfg2q(u32 cfgflags) +{ + u32 flags = 0; + + if (cfgflags & IEEE80211_CHAN_DISABLED) + flags |= QLINK_CHAN_DISABLED; + + if (cfgflags & IEEE80211_CHAN_NO_IR) + flags |= QLINK_CHAN_NO_IR; + + if (cfgflags & IEEE80211_CHAN_RADAR) + flags |= QLINK_CHAN_RADAR; + + if (cfgflags & IEEE80211_CHAN_NO_HT40PLUS) + flags |= QLINK_CHAN_NO_HT40PLUS; + + if (cfgflags & IEEE80211_CHAN_NO_HT40MINUS) + flags |= QLINK_CHAN_NO_HT40MINUS; + + if (cfgflags & IEEE80211_CHAN_NO_80MHZ) + flags |= QLINK_CHAN_NO_80MHZ; + + if (cfgflags & IEEE80211_CHAN_NO_160MHZ) + flags |= QLINK_CHAN_NO_160MHZ; + + return flags; +} + +static u32 qtnf_reg_rule_flags_parse(u32 qflags) +{ + u32 flags = 0; + + if (qflags & QLINK_RRF_NO_OFDM) + flags |= NL80211_RRF_NO_OFDM; + + if (qflags & QLINK_RRF_NO_CCK) + flags |= NL80211_RRF_NO_CCK; + + if (qflags & QLINK_RRF_NO_INDOOR) + flags |= NL80211_RRF_NO_INDOOR; + + if (qflags & QLINK_RRF_NO_OUTDOOR) + flags |= NL80211_RRF_NO_OUTDOOR; + + if (qflags & QLINK_RRF_DFS) + flags |= NL80211_RRF_DFS; + + if (qflags & QLINK_RRF_PTP_ONLY) + flags |= NL80211_RRF_PTP_ONLY; + + if (qflags & QLINK_RRF_PTMP_ONLY) + flags |= NL80211_RRF_PTMP_ONLY; + + if (qflags & QLINK_RRF_NO_IR) + flags |= NL80211_RRF_NO_IR; + + if (qflags & QLINK_RRF_AUTO_BW) + flags |= NL80211_RRF_AUTO_BW; + + if (qflags & QLINK_RRF_IR_CONCURRENT) + flags |= NL80211_RRF_IR_CONCURRENT; + + if (qflags & QLINK_RRF_NO_HT40MINUS) + flags |= NL80211_RRF_NO_HT40MINUS; + + if (qflags & QLINK_RRF_NO_HT40PLUS) + flags |= NL80211_RRF_NO_HT40PLUS; + + if (qflags & QLINK_RRF_NO_80MHZ) + flags |= NL80211_RRF_NO_80MHZ; + + if (qflags & QLINK_RRF_NO_160MHZ) + flags |= NL80211_RRF_NO_160MHZ; + + return flags; +} + +void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule, + const struct qlink_tlv_reg_rule *tlv) +{ + rule->freq_range.start_freq_khz = le32_to_cpu(tlv->start_freq_khz); + rule->freq_range.end_freq_khz = le32_to_cpu(tlv->end_freq_khz); + rule->freq_range.max_bandwidth_khz = + le32_to_cpu(tlv->max_bandwidth_khz); + rule->power_rule.max_antenna_gain = le32_to_cpu(tlv->max_antenna_gain); + rule->power_rule.max_eirp = le32_to_cpu(tlv->max_eirp); + rule->dfs_cac_ms = le32_to_cpu(tlv->dfs_cac_ms); + rule->flags = qtnf_reg_rule_flags_parse(le32_to_cpu(tlv->flags)); +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 781ea7fe79f2..f873beed2ae7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -79,5 +79,10 @@ bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit, unsigned int arr_max_len); void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl, struct qlink_acl_data *qacl); +enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band); +enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state); +u32 qlink_utils_chflags_cfg2q(u32 cfgflags); +void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule, + const struct qlink_tlv_reg_rule *tlv_rule); #endif /* _QTN_FMAC_QLINK_UTIL_H_ */ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 217d2a7a43c7..ac746c322554 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) /* <2> work queue */ rtlpriv->works.hw = hw; rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); + if (unlikely(!rtlpriv->works.rtl_wq)) { + pr_err("Failed to allocate work queue\n"); + return; + } + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, (void *)rtl_watchdog_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 48ca52102cef..4055e0ab75ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -499,16 +499,16 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw) memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); - spin_lock_bh(&rtlpriv->locks.waitq_lock); + spin_lock(&rtlpriv->locks.waitq_lock); if (!skb_queue_empty(&mac->skb_waitq[tid]) && (ring->entries - skb_queue_len(&ring->queue) > rtlhal->max_earlymode_num)) { skb = skb_dequeue(&mac->skb_waitq[tid]); } else { - spin_unlock_bh(&rtlpriv->locks.waitq_lock); + spin_unlock(&rtlpriv->locks.waitq_lock); break; } - spin_unlock_bh(&rtlpriv->locks.waitq_lock); + spin_unlock(&rtlpriv->locks.waitq_lock); /* Some macaddr can't do early mode. like * multicast/broadcast/no_qos data diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 203e7b574e84..e2e0bfbc24fe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 106011a24827..483dc8bdc555 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -372,8 +372,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_88e *p_drvinfo; struct ieee80211_hdr *hdr; - + u8 wake_match; u32 phystatus = GET_RX_DESC_PHYST(pdesc); + status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc); if (status->packet_report_type == TX_REPORT2) status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc); @@ -399,18 +400,18 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate); status->macid = GET_RX_DESC_MACID(pdesc); - if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(2); + if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(1); + wake_match = BIT(1); else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) - status->wake_match = BIT(0); + wake_match = BIT(0); else - status->wake_match = 0; - if (status->wake_match) + wake_match = 0; + if (wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", - status->wake_match); + wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index 18c76990a089..86b1b88cc4ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); if (cmd_send_packet) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 7c5b54b71a92..67305ce915ec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 09cf8180e4ff..d297cfc0fd2b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -331,6 +331,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, struct rx_fwinfo *p_drvinfo; struct ieee80211_hdr *hdr; u32 phystatus = GET_RX_DESC_PHYST(pdesc); + u8 wake_match; if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0) status->packet_report_type = NORMAL_RX; @@ -350,18 +351,18 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate); status->macid = GET_RX_DESC_MACID(pdesc); - if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(2); + if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(1); + wake_match = BIT(1); else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) - status->wake_match = BIT(0); + wake_match = BIT(0); else - status->wake_match = 0; - if (status->wake_match) + wake_match = 0; + if (wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", - status->wake_match); + wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 514891ea2c64..d8260c7afe09 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -663,7 +663,7 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) } -void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index be451a6f7dbe..33481232fad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 4d7fa27f55ca..aa56058af56e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, u1rsvdpageloc, sizeof(u1rsvdpageloc)); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 9ada9a06c6ea..d87ba03fe78f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -300,7 +300,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_8723be *p_drvinfo; struct ieee80211_hdr *hdr; - + u8 wake_match; u32 phystatus = GET_RX_DESC_PHYST(pdesc); status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); @@ -329,18 +329,18 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, status->packet_report_type = NORMAL_RX; - if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(2); + if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(1); + wake_match = BIT(1); else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) - status->wake_match = BIT(0); + wake_match = BIT(0); else - status->wake_match = 0; - if (status->wake_match) + wake_match = 0; + if (wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", - status->wake_match); + wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index dc0eb692088f..fe32d397d287 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1623,6 +1623,8 @@ out: &reserved_page_packet_8812[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); @@ -1759,6 +1761,8 @@ out: &reserved_page_packet_8821[0], totalpacketlen); skb = dev_alloc_skb(totalpacketlen); + if (!skb) + return; skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen); rtstatus = rtl_cmd_send_packet(hw, skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index db5e628b17ed..7b6faf38e09c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -436,7 +436,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rx_fwinfo_8821ae *p_drvinfo; struct ieee80211_hdr *hdr; - + u8 wake_match; u32 phystatus = GET_RX_DESC_PHYST(pdesc); status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); @@ -473,18 +473,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, status->packet_report_type = NORMAL_RX; if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) - status->wake_match = BIT(2); + wake_match = BIT(2); else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) - status->wake_match = BIT(1); + wake_match = BIT(1); else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) - status->wake_match = BIT(0); + wake_match = BIT(0); else - status->wake_match = 0; + wake_match = 0; - if (status->wake_match) + if (wake_match) RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", - status->wake_match); + wake_match); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index e32e9ffa3192..518aaa875361 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2138,7 +2138,6 @@ struct rtl_stats { u8 packet_report_type; u32 macid; - u8 wake_match; u32 bt_rx_rssi_percentage; u32 macid_valid_entry[2]; }; diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index dcb2c8b0feb6..affefaaea1ea 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -372,8 +372,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 nla_cmd; int err; - err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy, - NULL); + err = nla_parse_deprecated(tb, WL1271_TM_ATTR_MAX, data, len, + wl1271_tm_policy, NULL); if (err) return err; diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index 7f34ec077ee5..75756fb8e7b0 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -41,8 +41,8 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, if (!data) return -EINVAL; - ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, - wlcore_vendor_attr_policy, NULL); + ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, + wlcore_vendor_attr_policy, NULL); if (ret) return ret; @@ -122,8 +122,8 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, if (!data) return -EINVAL; - ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, - wlcore_vendor_attr_policy, NULL); + ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, + wlcore_vendor_attr_policy, NULL); if (ret) return ret; diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 936c0b3e0ba2..05847eb91a1b 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -248,6 +248,22 @@ struct xenvif_hash { struct xenvif_hash_cache cache; }; +struct backend_info { + struct xenbus_device *dev; + struct xenvif *vif; + + /* This is the state that will be reflected in xenstore when any + * active hotplug script completes. + */ + enum xenbus_state state; + + enum xenbus_state frontend_state; + struct xenbus_watch hotplug_status_watch; + u8 have_hotplug_status_watch:1; + + const char *hotplug_script; +}; + struct xenvif { /* Unique identifier for this interface. */ domid_t domid; @@ -283,6 +299,8 @@ struct xenvif { struct xenbus_watch credit_watch; struct xenbus_watch mcast_ctrl_watch; + struct backend_info *be; + spinlock_t lock; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 6da12518e693..783198844dd7 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue) } static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; @@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) - return fallback(dev, skb, NULL) % dev->real_num_tx_queues; + return netdev_pick_tx(dev, skb, NULL) % + dev->real_num_tx_queues; xenvif_set_skb_hash(vif, skb); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 330ddb64930f..41c9e8f2e520 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -22,22 +22,6 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> -struct backend_info { - struct xenbus_device *dev; - struct xenvif *vif; - - /* This is the state that will be reflected in xenstore when any - * active hotplug script completes. - */ - enum xenbus_state state; - - enum xenbus_state frontend_state; - struct xenbus_watch hotplug_status_watch; - u8 have_hotplug_status_watch:1; - - const char *hotplug_script; -}; - static int connect_data_rings(struct backend_info *be, struct xenvif_queue *queue); static void connect(struct backend_info *be); @@ -472,6 +456,7 @@ static int backend_create_xenvif(struct backend_info *be) return err; } be->vif = vif; + vif->be = be; kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); return 0; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index c914c24f880b..8d33970a2950 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb) } static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { unsigned int num_queues = dev->real_num_tx_queues; u32 hash; @@ -2038,7 +2037,7 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* Missed the backend's CLOSING state -- fallthrough */ + /* Fall through - Missed the backend's CLOSING state. */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 01acb6e53365..99727a2edda0 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -781,9 +781,7 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext) int result = 0; int res_len; static bool wtx; - struct device *dev; struct device *spidevice; - struct nfc_digital_dev *nfcddev; struct sk_buff *skb_resp; struct st95hf_context *stcontext = (struct st95hf_context *)st95hfcontext; @@ -828,8 +826,6 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext) goto end; } - dev = &stcontext->nfcdev->dev; - nfcddev = stcontext->ddev; if (skb_resp->data[2] == WTX_REQ_FROM_TAG) { /* Request for new FWT from tag */ result = st95hf_handle_wtx(stcontext, true, skb_resp->data[3]); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index c851cf6e01c4..784a2e76a1b0 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -10,6 +10,7 @@ #ifndef __QETH_CORE_H__ #define __QETH_CORE_H__ +#include <linux/completion.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> @@ -21,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include <net/ipv6.h> @@ -163,6 +165,12 @@ struct qeth_vnicc_info { bool rx_bcast_enabled; }; +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_setadp_cmd func) +{ + return (ipa->supported_funcs & func); +} + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -176,9 +184,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, } #define qeth_adp_supported(c, f) \ - qeth_is_ipa_supported(&c->options.adp, f) -#define qeth_adp_enabled(c, f) \ - qeth_is_ipa_enabled(&c->options.adp, f) + qeth_is_adp_supported(&c->options.adp, f) #define qeth_is_supported(c, f) \ qeth_is_ipa_supported(&c->options.ipa4, f) #define qeth_is_enabled(c, f) \ @@ -217,6 +223,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /* QDIO queue and buffer handling */ /*****************************************************************************/ #define QETH_MAX_QUEUES 4 +#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */ +#define QETH_IQD_MCAST_TXQ 0 +#define QETH_IQD_MIN_UCAST_TXQ 1 #define QETH_IN_BUF_SIZE_DEFAULT 65536 #define QETH_IN_BUF_COUNT_DEFAULT 64 #define QETH_IN_BUF_COUNT_HSDEFAULT 128 @@ -365,34 +374,6 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ -enum qeth_qdio_buffer_states { - /* - * inbound: read out by driver; owned by hardware in order to be filled - * outbound: owned by driver in order to be filled - */ - QETH_QDIO_BUF_EMPTY, - /* - * inbound: filled by hardware; owned by driver in order to be read out - * outbound: filled by driver; owned by hardware in order to be sent - */ - QETH_QDIO_BUF_PRIMED, - /* - * inbound: not applicable - * outbound: identified to be pending in TPQ - */ - QETH_QDIO_BUF_PENDING, - /* - * inbound: not applicable - * outbound: found in completion queue - */ - QETH_QDIO_BUF_IN_CQ, - /* - * inbound: not applicable - * outbound: handled via transfer pending / completion queue - */ - QETH_QDIO_BUF_HANDLED_DELAYED, -}; - enum qeth_qdio_info_states { QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED, @@ -424,6 +405,19 @@ struct qeth_qdio_q { int next_buf_to_init; }; +enum qeth_qdio_out_buffer_state { + /* Owned by driver, in order to be filled. */ + QETH_QDIO_BUF_EMPTY, + /* Filled by driver; owned by hardware in order to be sent. */ + QETH_QDIO_BUF_PRIMED, + /* Identified to be pending in TPQ. */ + QETH_QDIO_BUF_PENDING, + /* Found in completion queue. */ + QETH_QDIO_BUF_IN_CQ, + /* Handled via transfer pending / completion queue. */ + QETH_QDIO_BUF_HANDLED_DELAYED, +}; + struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; @@ -462,7 +456,6 @@ struct qeth_card_stats { u64 rx_errors; u64 rx_dropped; u64 rx_multicast; - u64 tx_errors; }; struct qeth_out_q_stats { @@ -477,6 +470,7 @@ struct qeth_out_q_stats { u64 skbs_linearized_fail; u64 tso_bytes; u64 packing_mode_switch; + u64 stopped; /* rtnl_link_stats64 */ u64 tx_packets; @@ -490,14 +484,12 @@ struct qeth_qdio_out_q { struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ struct qeth_out_q_stats stats; - int queue_no; + u8 next_buf_to_fill; + u8 max_elements; + u8 queue_no; + u8 do_pack; struct qeth_card *card; atomic_t state; - int do_pack; - /* - * index of buffer to be filled by driver; state EMPTY or PACKING - */ - int next_buf_to_fill; /* * number of buffers that are currently filled (PRIMED) * -> these buffers are hardware-owned @@ -507,6 +499,11 @@ struct qeth_qdio_out_q { atomic_t set_pci_flags_count; }; +static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; +} + struct qeth_qdio_info { atomic_t state; /* input */ @@ -538,7 +535,6 @@ struct qeth_qdio_info { enum qeth_channel_states { CH_STATE_UP, CH_STATE_DOWN, - CH_STATE_ACTIVATING, CH_STATE_HALTED, CH_STATE_STOPPED, CH_STATE_RCD, @@ -585,7 +581,10 @@ struct qeth_cmd_buffer { enum qeth_cmd_buffer_state state; struct qeth_channel *channel; struct qeth_reply *reply; + long timeout; unsigned char *data; + void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob, + unsigned int length); void (*callback)(struct qeth_card *card, struct qeth_channel *channel, struct qeth_cmd_buffer *iob); }; @@ -610,6 +609,11 @@ struct qeth_channel { int io_buf_no; }; +static inline bool qeth_trylock_channel(struct qeth_channel *channel) +{ + return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0; +} + /** * OSA card related definitions */ @@ -631,17 +635,15 @@ struct qeth_seqno { __u32 pdu_hdr; __u32 pdu_hdr_ack; __u16 ipa; - __u32 pkt_seqno; }; struct qeth_reply { struct list_head list; - wait_queue_head_t wait_q; + struct completion received; int (*callback)(struct qeth_card *, struct qeth_reply *, unsigned long); u32 seqno; unsigned long offset; - atomic_t received; int rc; void *param; refcount_t refcnt; @@ -663,7 +665,7 @@ struct qeth_card_info { __u16 func_level; char mcl_level[QETH_MCL_LENGTH + 1]; u8 open_when_online:1; - int guestlan; + u8 is_vm_nic:1; int mac_bits; enum qeth_card_types type; enum qeth_link_types link_type; @@ -774,18 +776,19 @@ struct qeth_card { struct qeth_card_options options; struct workqueue_struct *event_wq; + struct workqueue_struct *cmd_wq; wait_queue_head_t wait_q; - spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); + struct mutex ip_lock; DECLARE_HASHTABLE(ip_mc_htable, 4); + struct work_struct rx_mode_work; struct work_struct kernel_thread_starter; spinlock_t thread_mask_lock; unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; - spinlock_t ip_lock; struct qeth_ipato ipato; struct list_head cmd_waiter_list; /* QDIO buffer handling */ @@ -827,6 +830,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev) return dev->netdev_ops != NULL; } +static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq) +{ + if (txq == QETH_IQD_MCAST_TXQ) + return dev->num_tx_queues - 1; + if (txq == dev->num_tx_queues - 1) + return QETH_IQD_MCAST_TXQ; + return txq; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { @@ -869,6 +881,16 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } +static inline int qeth_get_ether_cast_type(struct sk_buff *skb) +{ + u8 *addr = eth_hdr(skb)->h_dest; + + if (is_multicast_ether_addr(addr)) + return is_broadcast_ether_addr(addr) ? RTN_BROADCAST : + RTN_MULTICAST; + return RTN_UNICAST; +} + static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, u8 flags) { @@ -922,18 +944,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv); -static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, - struct sk_buff *skb, - int ipv, int cast_type) -{ - if (IS_IQD(card) && cast_type != RTN_UNICAST) - return card->qdio.out_qs[card->qdio.no_out_queues - 1]; - if (!card->qdio.do_prio_queueing) - return card->qdio.out_qs[card->qdio.default_out_queue]; - return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; -} +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb); extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; @@ -979,12 +990,10 @@ void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_cmd_buffers(struct qeth_channel *); -void qeth_clear_qdio_buffers(struct qeth_card *); +void qeth_drain_output_queues(struct qeth_card *card); void qeth_setadp_promisc_mode(struct qeth_card *); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); -void qeth_prepare_control_data(struct qeth_card *, int, - struct qeth_cmd_buffer *); void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, u16 cmd_length); @@ -1016,6 +1025,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev); int qeth_open(struct net_device *dev); int qeth_stop(struct net_device *dev); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 44bd6f04c145..009f2c0ec504 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -61,13 +61,13 @@ static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; -static void qeth_send_control_data_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob); +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); -static void qeth_free_qdio_buffers(struct qeth_card *); +static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); @@ -85,7 +85,7 @@ static void qeth_close_dev_handler(struct work_struct *work) static const char *qeth_get_cardname(struct qeth_card *card) { - if (card->info.guestlan) { + if (IS_VM_NIC(card)) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " Virtual NIC QDIO"; @@ -120,7 +120,7 @@ static const char *qeth_get_cardname(struct qeth_card *card) /* max length to be returned: 14 */ const char *qeth_get_cardname_short(struct qeth_card *card) { - if (card->info.guestlan) { + if (IS_VM_NIC(card)) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return "Virt.NIC QDIO"; @@ -511,7 +511,9 @@ static int __qeth_issue_next_read(struct qeth_card *card) CARD_DEVID(card)); return -ENOMEM; } + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + iob->callback = qeth_issue_next_read_cb; QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0); @@ -542,11 +544,10 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; - reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (reply) { refcount_set(&reply->refcnt, 1); - atomic_set(&reply->received, 0); - init_waitqueue_head(&reply->wait_q); + init_completion(&reply->received); } return reply; } @@ -576,10 +577,10 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) spin_unlock_irq(&card->lock); } -static void qeth_notify_reply(struct qeth_reply *reply) +static void qeth_notify_reply(struct qeth_reply *reply, int reason) { - atomic_inc(&reply->received); - wake_up(&reply->wait_q); + reply->rc = reason; + complete(&reply->received); } static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, @@ -664,10 +665,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(reply, &card->cmd_waiter_list, list) { - reply->rc = -EIO; - qeth_notify_reply(reply); - } + list_for_each_entry(reply, &card->cmd_waiter_list, list) + qeth_notify_reply(reply, -EIO); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -675,9 +674,6 @@ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { - if (!buffer) - return 0; - QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", @@ -704,6 +700,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) do { if (channel->iob[index].state == BUF_STATE_FREE) { channel->iob[index].state = BUF_STATE_LOCKED; + channel->iob[index].timeout = QETH_TIMEOUT; channel->io_buf_no = (channel->io_buf_no + 1) % QETH_CMD_BUFFER_NO; memset(channel->iob[index].data, 0, QETH_BUFSIZE); @@ -722,7 +719,7 @@ void qeth_release_buffer(struct qeth_channel *channel, spin_lock_irqsave(&channel->iob_lock, flags); iob->state = BUF_STATE_FREE; - iob->callback = qeth_send_control_data_cb; + iob->callback = NULL; if (iob->reply) { qeth_put_reply(iob->reply); iob->reply = NULL; @@ -743,10 +740,8 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) { struct qeth_reply *reply = iob->reply; - if (reply) { - reply->rc = rc; - qeth_notify_reply(reply); - } + if (reply) + qeth_notify_reply(reply, rc); qeth_release_buffer(iob->channel, iob); } @@ -780,9 +775,9 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); -static void qeth_send_control_data_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply = NULL; @@ -846,11 +841,8 @@ static void qeth_send_control_data_cb(struct qeth_card *card, } } - if (rc <= 0) { - reply->rc = rc; - qeth_notify_reply(reply); - } - + if (rc <= 0) + qeth_notify_reply(reply, rc); qeth_put_reply(reply); out: @@ -1173,20 +1165,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_release_skbs(buf); - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { + for (i = 0; i < queue->max_elements; ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; } - qeth_scrub_qdio_buffer(buf->buffer, - QETH_MAX_BUFFER_ELEMENTS(queue->card)); + qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); buf->next_element_to_fill = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } -static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) +static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) { int j; @@ -1202,19 +1193,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) } } -void qeth_clear_qdio_buffers(struct qeth_card *card) +void qeth_drain_output_queues(struct qeth_card *card) { int i; QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - if (card->qdio.out_qs[i]) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); - } + if (card->qdio.out_qs[i]) + qeth_drain_output_queue(card->qdio.out_qs[i], false); } } -EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); +EXPORT_SYMBOL_GPL(qeth_drain_output_queues); static void qeth_free_buffer_pool(struct qeth_card *card) { @@ -1273,7 +1263,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) break; channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; - channel->iob[cnt].callback = qeth_send_control_data_cb; } if (cnt < QETH_CMD_BUFFER_NO) { qeth_clean_channel(channel); @@ -1285,30 +1274,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) return 0; } -static void qeth_set_single_write_queues(struct qeth_card *card) +static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) { - if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 4)) - qeth_free_qdio_buffers(card); + unsigned int count = single ? 1 : card->dev->num_tx_queues; - card->qdio.no_out_queues = 1; - if (card->qdio.default_out_queue != 0) - dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + rtnl_lock(); + netif_set_real_num_tx_queues(card->dev, count); + rtnl_unlock(); - card->qdio.default_out_queue = 0; -} + if (card->qdio.no_out_queues == count) + return; -static void qeth_set_multiple_write_queues(struct qeth_card *card) -{ - if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 1)) { - qeth_free_qdio_buffers(card); - card->qdio.default_out_queue = 2; - } - card->qdio.no_out_queues = 4; + if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) + qeth_free_qdio_queues(card); + + if (count == 1) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; + card->qdio.no_out_queues = count; } -static void qeth_update_from_chp_desc(struct qeth_card *card) +static int qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channel_path_desc_fmt0 *chp_dsc; @@ -1318,21 +1305,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) - goto out; + return -ENOMEM; card->info.func_level = 0x4100 + chp_dsc->desc; - if (card->info.type == QETH_CARD_TYPE_IQD) - goto out; - /* CHPP field bit 6 == 1 -> single queue */ - if ((chp_dsc->chpp & 0x02) == 0x02) - qeth_set_single_write_queues(card); - else - qeth_set_multiple_write_queues(card); -out: + if (IS_OSD(card) || IS_OSX(card)) + /* CHPP field bit 6 == 1 -> single queue */ + qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); + kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); + return 0; } static void qeth_init_qdio_info(struct qeth_card *card) @@ -1341,12 +1325,11 @@ static void qeth_init_qdio_info(struct qeth_card *card) atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; - card->qdio.no_out_queues = QETH_MAX_QUEUES; /* inbound */ card->qdio.no_in_queues = 1; card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; else card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; @@ -1409,9 +1392,7 @@ static void qeth_setup_card(struct qeth_card *card) card->info.type = CARD_RDEV(card)->id.driver_info; card->state = CARD_STATE_DOWN; - spin_lock_init(&card->mclock); spin_lock_init(&card->lock); - spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); @@ -1451,7 +1432,8 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) CARD_WDEV(card) = gdev->cdev[1]; CARD_DDEV(card) = gdev->cdev[2]; - card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); + card->event_wq = alloc_ordered_workqueue("%s_event", 0, + dev_name(&gdev->dev)); if (!card->event_wq) goto out_wq; if (qeth_setup_channel(&card->read, true)) @@ -1571,7 +1553,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else @@ -1644,8 +1626,8 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) card->info.chpid = prcd[30]; card->info.unit_addr2 = prcd[31]; card->info.cula = prcd[63]; - card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && - (prcd[0x11] == _ascebc['M'])); + card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) && + (prcd[0x11] == _ascebc['M'])); } static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) @@ -1709,13 +1691,11 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) { enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; - if (card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSN) + if (IS_OSM(card) || IS_OSN(card)) disc = QETH_DISCIPLINE_LAYER2; - else if (card->info.guestlan) - disc = (card->info.type == QETH_CARD_TYPE_IQD) ? - QETH_DISCIPLINE_LAYER3 : - qeth_vm_detect_layer(card); + else if (IS_VM_NIC(card)) + disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + qeth_vm_detect_layer(card); switch (disc) { case QETH_DISCIPLINE_LAYER2: @@ -1771,121 +1751,16 @@ static void qeth_init_func_level(struct qeth_card *card) } } -static int qeth_idx_activate_get_answer(struct qeth_card *card, - struct qeth_channel *channel, - void (*reply_cb)(struct qeth_card *, - struct qeth_channel *, - struct qeth_cmd_buffer *)) -{ - struct qeth_cmd_buffer *iob; - int rc; - - QETH_DBF_TEXT(SETUP, 2, "idxanswr"); - iob = qeth_get_buffer(channel); - if (!iob) - return -ENOMEM; - iob->callback = reply_cb; - qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); - - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); - - if (rc) { - QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - atomic_set(&channel->irq_pending, 0); - qeth_release_buffer(channel, iob); - wake_up(&card->wait_q); - return rc; - } - rc = wait_event_interruptible_timeout(card->wait_q, - channel->state == CH_STATE_UP, QETH_TIMEOUT); - if (rc == -ERESTARTSYS) - return rc; - if (channel->state != CH_STATE_UP) { - rc = -ETIME; - QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - } else - rc = 0; - return rc; -} - -static int qeth_idx_activate_channel(struct qeth_card *card, - struct qeth_channel *channel, - void (*reply_cb)(struct qeth_card *, - struct qeth_channel *, - struct qeth_cmd_buffer *)) +static void qeth_idx_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) { - struct qeth_cmd_buffer *iob; - __u16 temp; - __u8 tmp; - int rc; - struct ccw_dev_id temp_devid; - - QETH_DBF_TEXT(SETUP, 2, "idxactch"); + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data); - iob = qeth_get_buffer(channel); - if (!iob) - return -ENOMEM; - iob->callback = reply_cb; - qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, - iob->data); - if (channel == &card->write) { - memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, + QETH_SEQ_NO_LENGTH); + if (iob->channel == &card->write) card->seqno.trans_hdr++; - } else { - memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); - } - tmp = ((u8)card->dev->dev_port) | 0x80; - memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); - memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), - &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); - memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), - &card->info.func_level, sizeof(__u16)); - ccw_device_get_id(CARD_DDEV(card), &temp_devid); - memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); - temp = (card->info.cula << 8) + card->info.unit_addr2; - memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); - - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); - - if (rc) { - QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", - rc); - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - atomic_set(&channel->irq_pending, 0); - qeth_release_buffer(channel, iob); - wake_up(&card->wait_q); - return rc; - } - rc = wait_event_interruptible_timeout(card->wait_q, - channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); - if (rc == -ERESTARTSYS) - return rc; - if (channel->state != CH_STATE_ACTIVATING) { - dev_warn(&channel->ccwdev->dev, "The qeth device driver" - " failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n", - CCW_DEVID(channel->ccwdev)); - QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - return -ETIME; - } - return qeth_idx_activate_get_answer(card, channel, reply_cb); } static int qeth_peer_func_level(int level) @@ -1897,112 +1772,21 @@ static int qeth_peer_func_level(int level) return level; } -static void qeth_idx_write_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) -{ - __u16 temp; - - QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); - - if (channel->state == CH_STATE_DOWN) { - channel->state = CH_STATE_ACTIVATING; - goto out; - } - - if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) - dev_err(&channel->ccwdev->dev, - "The adapter is used exclusively by another " - "host\n"); - else - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", - CCW_DEVID(channel->ccwdev)); - goto out; - } - memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); - if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", - CCW_DEVID(channel->ccwdev), - card->info.func_level, temp); - goto out; - } - channel->state = CH_STATE_UP; -out: - qeth_release_buffer(channel, iob); -} - -static void qeth_idx_read_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) -{ - __u16 temp; - - QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); - if (channel->state == CH_STATE_DOWN) { - channel->state = CH_STATE_ACTIVATING; - goto out; - } - - if (qeth_check_idx_response(card, iob->data)) - goto out; - - if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { - case QETH_IDX_ACT_ERR_EXCL: - dev_err(&channel->ccwdev->dev, - "The adapter is used exclusively by another " - "host\n"); - break; - case QETH_IDX_ACT_ERR_AUTH: - case QETH_IDX_ACT_ERR_AUTH_USER: - dev_err(&channel->ccwdev->dev, - "Setting the device online failed because of " - "insufficient authorization\n"); - break; - default: - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", - CCW_DEVID(channel->ccwdev)); - } - QETH_CARD_TEXT_(card, 2, "idxread%c", - QETH_IDX_ACT_CAUSE_CODE(iob->data)); - goto out; - } - - memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); - if (temp != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", - CCW_DEVID(channel->ccwdev), - card->info.func_level, temp); - goto out; - } - memcpy(&card->token.issuer_rm_r, - QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), - QETH_MPC_TOKEN_LENGTH); - memcpy(&card->info.mcl_level[0], - QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); - channel->state = CH_STATE_UP; -out: - qeth_release_buffer(channel, iob); -} - -void qeth_prepare_control_data(struct qeth_card *card, int len, - struct qeth_cmd_buffer *iob) +static void qeth_mpc_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) { - qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); - iob->callback = qeth_release_buffer_cb; + qeth_idx_finalize_cmd(card, iob, length); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); - card->seqno.trans_hdr++; memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); + + iob->reply->seqno = QETH_IDX_COMMAND_SEQNO; + iob->callback = qeth_release_buffer_cb; } -EXPORT_SYMBOL_GPL(qeth_prepare_control_data); /** * qeth_send_control_data() - send control command to the card @@ -2035,17 +1819,12 @@ static int qeth_send_control_data(struct qeth_card *card, int len, void *reply_param) { struct qeth_channel *channel = iob->channel; + long timeout = iob->timeout; int rc; struct qeth_reply *reply = NULL; - unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); - if (card->read_or_write_problem) { - qeth_release_buffer(channel, iob); - return -EIO; - } reply = qeth_alloc_reply(card); if (!reply) { qeth_release_buffer(channel, iob); @@ -2058,27 +1837,24 @@ static int qeth_send_control_data(struct qeth_card *card, int len, qeth_get_reply(reply); iob->reply = reply; - while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; - - if (IS_IPA(iob->data)) { - cmd = __ipa_cmd(iob); - cmd->hdr.seqno = card->seqno.ipa++; - reply->seqno = cmd->hdr.seqno; - event_timeout = QETH_IPA_TIMEOUT; - } else { - reply->seqno = QETH_IDX_COMMAND_SEQNO; - event_timeout = QETH_TIMEOUT; + timeout = wait_event_interruptible_timeout(card->wait_q, + qeth_trylock_channel(channel), + timeout); + if (timeout <= 0) { + qeth_put_reply(reply); + qeth_release_buffer(channel, iob); + return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; } - qeth_prepare_control_data(card, len, iob); - qeth_enqueue_reply(card, reply); + iob->finalize(card, iob, len); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); - timeout = jiffies + event_timeout; + qeth_enqueue_reply(card, reply); QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, event_timeout); + (addr_t) iob, 0, 0, timeout); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", @@ -2092,30 +1868,211 @@ static int qeth_send_control_data(struct qeth_card *card, int len, return rc; } - /* we have only one long running ipassist, since we can ensure - process context of this command we can sleep */ - if (cmd && cmd->hdr.command == IPA_CMD_SETIP && - cmd->hdr.prot_version == QETH_PROT_IPV4) { - if (!wait_event_timeout(reply->wait_q, - atomic_read(&reply->received), event_timeout)) - goto time_err; - } else { - while (!atomic_read(&reply->received)) { - if (time_after(jiffies, timeout)) - goto time_err; - cpu_relax(); - } - } + timeout = wait_for_completion_interruptible_timeout(&reply->received, + timeout); + if (timeout <= 0) + rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; qeth_dequeue_reply(card, reply); - rc = reply->rc; + if (!rc) + rc = reply->rc; qeth_put_reply(reply); return rc; +} -time_err: - qeth_dequeue_reply(card, reply); - qeth_put_reply(reply); - return -ETIME; +static int qeth_idx_check_activate_response(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + int rc; + + rc = qeth_check_idx_response(card, iob->data); + if (rc) + return rc; + + if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) + return 0; + + /* negative reply: */ + QETH_DBF_TEXT_(SETUP, 2, "idxneg%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); + + switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { + case QETH_IDX_ACT_ERR_EXCL: + dev_err(&channel->ccwdev->dev, + "The adapter is used exclusively by another host\n"); + return -EBUSY; + case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: + dev_err(&channel->ccwdev->dev, + "Setting the device online failed because of insufficient authorization\n"); + return -EPERM; + default: + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", + CCW_DEVID(channel->ccwdev)); + return -EIO; + } +} + +static void qeth_idx_query_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + u16 peer_level; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxrdcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if (peer_level != qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + goto out; + } + + memcpy(&card->token.issuer_rm_r, + QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + memcpy(&card->info.mcl_level[0], + QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); + +out: + qeth_notify_reply(iob->reply, rc); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_query_write_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + u16 peer_level; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxwrcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if ((peer_level & ~0x0100) != + qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + } + +out: + qeth_notify_reply(iob->reply, rc); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_finalize_query_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) +{ + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data); +} + +static void qeth_idx_activate_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + qeth_notify_reply(iob->reply, 0); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_setup_activate_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + u16 addr = (card->info.cula << 8) + card->info.unit_addr2; + u8 port = ((u8)card->dev->dev_port) | 0x80; + struct ccw_dev_id dev_id; + + ccw_device_get_id(CARD_DDEV(card), &dev_id); + iob->finalize = qeth_idx_finalize_cmd; + iob->callback = qeth_idx_activate_cb; + + memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); + memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), + &card->info.func_level, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); +} + +static int qeth_idx_activate_read_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->read; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxread"); + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + + rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); + if (rc) + return rc; + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + iob->finalize = qeth_idx_finalize_query_cmd; + iob->callback = qeth_idx_query_read_cb; + rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; +} + +static int qeth_idx_activate_write_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->write; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxwrite"); + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + + rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); + if (rc) + return rc; + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + iob->finalize = qeth_idx_finalize_query_cmd; + iob->callback = qeth_idx_query_write_cb; + rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; } static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -2140,7 +2097,9 @@ static int qeth_cm_enable(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "cmenable"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); + memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), @@ -2173,7 +2132,9 @@ static int qeth_cm_setup(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "cmsetup"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); + memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), @@ -2206,7 +2167,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) /* adjust RX buffer size to new max MTU: */ card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; if (dev->max_mtu && dev->max_mtu != max_mtu) - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); } else { if (dev->mtu) new_mtu = dev->mtu; @@ -2253,7 +2214,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); } else { @@ -2290,6 +2251,7 @@ static int qeth_ulp_enable(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; @@ -2336,6 +2298,7 @@ static int qeth_ulp_setup(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), @@ -2377,12 +2340,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q) if (!q) return; - qeth_clear_outq_buffers(q, 1); + qeth_drain_output_queue(q, true); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } -static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) +static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) { struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); @@ -2396,7 +2359,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) return q; } -static int qeth_alloc_qdio_buffers(struct qeth_card *card) +static int qeth_alloc_qdio_queues(struct qeth_card *card) { int i, j; @@ -2417,11 +2380,12 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) /* outbound */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); + card->qdio.out_qs[i] = qeth_alloc_output_queue(); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); + card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { @@ -2458,7 +2422,7 @@ out_nomem: return -ENOMEM; } -static void qeth_free_qdio_buffers(struct qeth_card *card) +static void qeth_free_qdio_queues(struct qeth_card *card) { int i, j; @@ -2523,6 +2487,7 @@ static int qeth_dm_act(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "dmact"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, DM_ACT, DM_ACT_SIZE); memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), @@ -2564,7 +2529,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } - rc = qeth_alloc_qdio_buffers(card); + rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; @@ -2572,7 +2537,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_qdio_establish(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); goto out_qdio; } rc = qeth_qdio_activate(card); @@ -2588,7 +2553,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) return 0; out_qdio: - qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + qeth_qdio_clear_card(card, !IS_IQD(card)); qdio_free(CARD_DDEV(card)); return rc; } @@ -2611,8 +2576,7 @@ void qeth_print_status_message(struct qeth_card *card) } /* fallthrough */ case QETH_CARD_TYPE_IQD: - if ((card->info.guestlan) || - (card->info.mcl_level[0] & 0x80)) { + if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) card->info.mcl_level[0]]; card->info.mcl_level[1] = (char) _ebcasc[(__u8) @@ -2733,7 +2697,7 @@ static int qeth_init_input_buffer(struct qeth_card *card, int qeth_init_qdio_queues(struct qeth_card *card) { - int i, j; + unsigned int i; int rc; QETH_DBF_TEXT(SETUP, 2, "initqdqs"); @@ -2762,19 +2726,15 @@ int qeth_init_qdio_queues(struct qeth_card *card) /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs, - QDIO_MAX_BUFFERS_PER_Q); - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - qeth_clear_output_buffer(card->qdio.out_qs[i], - card->qdio.out_qs[i]->bufs[j]); - } - card->qdio.out_qs[i]->card = card; - card->qdio.out_qs[i]->next_buf_to_fill = 0; - card->qdio.out_qs[i]->do_pack = 0; - atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); - atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); - atomic_set(&card->qdio.out_qs[i]->state, - QETH_OUT_Q_UNLOCKED); + struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; + + qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + queue->next_buf_to_fill = 0; + queue->do_pack = 0; + atomic_set(&queue->used_buffers, 0); + atomic_set(&queue->set_pci_flags_count, 0); + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } return 0; } @@ -2805,12 +2765,26 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, cmd->hdr.prot_version = prot; } +static void qeth_ipa_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) +{ + qeth_mpc_finalize_cmd(card, iob, length); + + /* override with IPA-specific values: */ + __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa; + iob->reply->seqno = card->seqno.ipa++; +} + void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, u16 cmd_length) { u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; u8 prot_type = qeth_mpc_select_prot_type(card); + iob->finalize = qeth_ipa_finalize_cmd; + iob->timeout = QETH_IPA_TIMEOUT; + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); @@ -2866,6 +2840,11 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, QETH_CARD_TEXT(card, 4, "sendipa"); + if (card->read_or_write_problem) { + qeth_release_buffer(iob->channel, iob); + return -EIO; + } + if (reply_cb == NULL) reply_cb = qeth_send_ipa_cmd_cb; memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); @@ -3251,7 +3230,7 @@ static void qeth_handle_send_error(struct qeth_card *card, int sbalf15 = buffer->buffer->element[15].sflags; QETH_CARD_TEXT(card, 6, "hdsnderr"); - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { if (sbalf15 == 0) { qdio_err = 0; } else { @@ -3348,7 +3327,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, if (queue->bufstates) queue->bufstates[bidx].user = buf; - if (queue->card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(queue->card)) continue; if (!queue->do_pack) { @@ -3378,11 +3357,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } QETH_TXQ_STAT_ADD(queue, bufs, count); - netif_trans_update(queue->card->dev); qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; - atomic_add(count, &queue->used_buffers); rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (rc) { @@ -3422,7 +3399,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) * do_send_packet. So, we check if there is a * packing buffer to be flushed here. */ - netif_stop_queue(queue->card->dev); index = queue->next_buf_to_fill; q_was_packing = queue->do_pack; /* queue->do_pack may change */ @@ -3467,7 +3443,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; } @@ -3493,7 +3469,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); if (qdio_err) { - netif_stop_queue(card->dev); + netif_tx_stop_all_queues(card->dev); qeth_schedule_recovery(card); return; } @@ -3549,12 +3525,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; + struct net_device *dev = card->dev; + struct netdev_queue *txq; int i; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_FATAL) { QETH_CARD_TEXT(card, 2, "achkcond"); - netif_stop_queue(card->dev); + netif_tx_stop_all_queues(dev); qeth_schedule_recovery(card); return; } @@ -3580,7 +3558,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, /* prepare the queue slot for re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, - QETH_MAX_BUFFER_ELEMENTS(card)); + queue->max_elements); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); @@ -3600,33 +3578,32 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) qeth_check_outbound_queue(queue); - netif_wake_queue(queue->card->dev); -} - -/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ -static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) -{ - if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) - return 2; - return queue_num; + if (IS_IQD(card)) + __queue = qeth_iqd_translate_txq(dev, __queue); + txq = netdev_get_tx_queue(dev, __queue); + /* xmit may have observed the full-condition, but not yet stopped the + * txq. In which case the code below won't trigger. So before returning, + * xmit will re-check the txq's fill level and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); } /** * Note: Function assumes that we have 4 outbound queues. */ -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv) +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) { - __be16 *tci; + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); u8 tos; switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_PREC: - switch (ipv) { + switch (qeth_get_ip_version(skb)) { case 4: tos = ipv4_get_dsfield(ip_hdr(skb)); break; @@ -3637,9 +3614,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return card->qdio.default_out_queue; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) - return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); + return ~tos >> 6 & 3; if (tos & IPTOS_MINCOST) - return qeth_cut_iqd_prio(card, 3); + return 3; if (tos & IPTOS_RELIABILITY) return 2; if (tos & IPTOS_THROUGHPUT) @@ -3650,12 +3627,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, case QETH_PRIO_Q_ING_SKB: if (skb->priority > 5) return 0; - return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); + return ~skb->priority >> 1 & 3; case QETH_PRIO_Q_ING_VLAN: - tci = &((struct ethhdr *)skb->data)->h_proto; - if (be16_to_cpu(*tci) == ETH_P_8021Q) - return qeth_cut_iqd_prio(card, - ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); + if (veth->h_vlan_proto == htons(ETH_P_8021Q)) + return ~ntohs(veth->h_vlan_TCI) >> + (VLAN_PRIO_SHIFT + 1) & 3; break; default: break; @@ -3729,8 +3705,8 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, unsigned int hdr_len, unsigned int proto_len, unsigned int *elements) { - const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card); const unsigned int contiguous = proto_len ? proto_len : 1; + const unsigned int max_elements = queue->max_elements; unsigned int __elements; addr_t start, end; bool push_ok; @@ -3867,11 +3843,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, * from qeth_core_header_cache. * @offset: when mapping the skb, start at skb->data + offset * @hd_len: if > 0, build a dedicated header element of this size + * flush: Prepare the buffer to be flushed, regardless of its fill level. */ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len) + unsigned int offset, unsigned int hd_len, + bool flush) { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; @@ -3900,8 +3878,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, QETH_TXQ_STAT_INC(queue, skbs_pack); /* If the buffer still has free elements, keep using it. */ - if (buf->next_element_to_fill < - QETH_MAX_BUFFER_ELEMENTS(queue->card)) + if (!flush && + buf->next_element_to_fill < queue->max_elements) return 0; } @@ -3918,15 +3896,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, { int index = queue->next_buf_to_fill; struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + struct netdev_queue *txq; + bool stopped = false; - /* - * check if buffer is empty to make sure that we do not 'overtake' - * ourselves and try to fill a buffer that is already primed + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); + + txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); + + if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); qeth_flush_buffers(queue, index, 1); + + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); return 0; } @@ -3936,6 +3930,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed) { struct qeth_qdio_out_buffer *buffer; + struct netdev_queue *txq; + bool stopped = false; int start_index; int flush_count = 0; int do_pack = 0; @@ -3947,21 +3943,24 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; - /* - * check if buffer is empty to make sure that we do not 'overtake' - * ourselves and try to fill a buffer that is already primed + + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } + + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); + /* check if we need to switch packing state of this queue */ qeth_switch_to_packing_if_needed(queue); if (queue->do_pack) { do_pack = 1; /* does packet fit in current buffer? */ - if ((QETH_MAX_BUFFER_ELEMENTS(card) - - buffer->next_element_to_fill) < elements_needed) { + if (buffer->next_element_to_fill + elements_needed > + queue->max_elements) { /* ... no -> set state PRIMED */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; @@ -3969,8 +3968,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[queue->next_buf_to_fill]; - /* we did a step forward, so check buffer state - * again */ + + /* We stepped forward, so sanity-check again: */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, @@ -3983,8 +3982,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } - flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, - hd_len); + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, + stopped); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -4015,6 +4024,8 @@ out: if (do_pack) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); return rc; } EXPORT_SYMBOL_GPL(qeth_do_send_packet); @@ -4101,9 +4112,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, } else { if (!push_len) kmem_cache_free(qeth_core_header_cache, hdr); - if (rc == -EBUSY) - /* roll back to ETH header */ - skb_pull(skb, push_len); } return rc; } @@ -4321,9 +4329,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) QETH_CARD_TEXT(card, 4, "setactlo"); - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + if ((IS_OSD(card) || IS_OSX(card)) && + qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { @@ -4348,7 +4355,6 @@ void qeth_tx_timeout(struct net_device *dev) card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); - QETH_CARD_STAT_INC(card, tx_errors); qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); @@ -4489,7 +4495,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) QETH_CARD_TEXT(card, 3, "snmpcmd"); - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && @@ -4732,14 +4738,6 @@ out: } EXPORT_SYMBOL_GPL(qeth_vm_request_mac); -static int qeth_get_qdio_q_format(struct qeth_card *card) -{ - if (card->info.type == QETH_CARD_TYPE_IQD) - return QDIO_IQDIO_QFMT; - else - return QDIO_QETH_QFMT; -} - static void qeth_determine_capabilities(struct qeth_card *card) { int rc; @@ -4878,7 +4876,8 @@ static int qeth_qdio_establish(struct qeth_card *card) memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); - init_data.q_format = qeth_get_qdio_q_format(card); + init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : + QDIO_QETH_QFMT; init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; init_data.no_input_qs = card->qdio.no_in_queues; @@ -4890,8 +4889,7 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; - init_data.scan_threshold = - (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; + init_data.scan_threshold = IS_IQD(card) ? 1 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -4937,7 +4935,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->write); qeth_clean_channel(&card->data); destroy_workqueue(card->event_wq); - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); kfree(card); @@ -4986,12 +4984,14 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; retry: if (retries < 3) QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", CARD_DEVID(card)); - rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + rc = qeth_qdio_clear_card(card, !IS_IQD(card)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); @@ -5019,8 +5019,9 @@ retriable: qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); - rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb); - if (rc == -ERESTARTSYS) { + + rc = qeth_idx_activate_read_channel(card); + if (rc == -EINTR) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; } else if (rc) { @@ -5030,8 +5031,9 @@ retriable: else goto retry; } - rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb); - if (rc == -ERESTARTSYS) { + + rc = qeth_idx_activate_write_channel(card); + if (rc == -EINTR) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; } else if (rc) { @@ -5171,7 +5173,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, return NULL; if (((skb_len >= card->options.rx_sg_cb) && - (!(card->info.type == QETH_CARD_TYPE_OSN)) && + !IS_OSN(card) && (!atomic_read(&card->force_alloc_skb))) || (card->options.cq == QETH_CQ_ENABLED)) use_rx_sg = 1; @@ -5562,13 +5564,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) switch (card->info.type) { case QETH_CARD_TYPE_IQD: - dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); + dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, + ether_setup, QETH_MAX_QUEUES, 1); + break; + case QETH_CARD_TYPE_OSM: + dev = alloc_etherdev(0); break; case QETH_CARD_TYPE_OSN: dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); break; default: - dev = alloc_etherdev(0); + dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); } if (!dev) @@ -5590,8 +5596,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->hw_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG; - if (IS_IQD(card)) + if (IS_IQD(card)) { + netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ); dev->features |= NETIF_F_SG; + } } return dev; @@ -5641,14 +5649,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) } qeth_setup_card(card); - qeth_update_from_chp_desc(card); - card->dev = qeth_alloc_netdev(card); if (!card->dev) { rc = -ENOMEM; goto err_card; } + card->qdio.no_out_queues = card->dev->num_tx_queues; + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; qeth_determine_capabilities(card); enforced_disc = qeth_enforce_discipline(card); switch (enforced_disc) { @@ -5661,9 +5671,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) if (rc) goto err_load; - gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) - ? card->discipline->devtype - : &qeth_osn_devtype; + gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : + card->discipline->devtype; rc = card->discipline->setup(card->gdev); if (rc) goto err_disc; @@ -5675,6 +5684,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) err_disc: qeth_core_free_discipline(card); err_load: +err_chp_desc: free_netdev(card->dev); err_card: qeth_core_free_card(card); @@ -5706,10 +5716,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) enum qeth_discipline_id def_discipline; if (!card->discipline) { - if (card->info.type == QETH_CARD_TYPE_IQD) - def_discipline = QETH_DISCIPLINE_LAYER3; - else - def_discipline = QETH_DISCIPLINE_LAYER2; + def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + QETH_DISCIPLINE_LAYER2; rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; @@ -5737,7 +5745,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qdio_free(CARD_DDEV(card)); } @@ -5837,13 +5845,10 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); break; case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) + if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && + !IS_VM_NIC(card)) return 1; - else - return 0; + return 0; case SIOCGMIIPHY: mii_data = if_mii(rq); mii_data->phy_id = 0; @@ -6193,7 +6198,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = card->stats.rx_errors; stats->rx_dropped = card->stats.rx_dropped; stats->multicast = card->stats.rx_multicast; - stats->tx_errors = card->stats.tx_errors; for (i = 0; i < card->qdio.no_out_queues; i++) { queue = card->qdio.out_qs[i]; @@ -6206,6 +6210,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) } EXPORT_SYMBOL_GPL(qeth_get_stats64); +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev) +{ + if (cast_type != RTN_UNICAST) + return QETH_IQD_MCAST_TXQ; + return QETH_IQD_MIN_UCAST_TXQ; +} +EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); + int qeth_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; @@ -6216,7 +6229,7 @@ int qeth_open(struct net_device *dev) return -EIO; card->data.state = CH_STATE_UP; - netif_start_queue(dev); + netif_tx_start_all_queues(dev); napi_enable(&card->napi); local_bh_disable(); diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f8c5d4a9be13..f5237b7c14c4 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -82,7 +82,7 @@ enum qeth_card_types { #define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM) #define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) #define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX) -#define IS_VM_NIC(card) ((card)->info.guestlan) +#define IS_VM_NIC(card) ((card)->info.is_vm_nic) #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 56deeb6f7bc0..9f392497d570 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, if (!card) return -EINVAL; + if (IS_IQD(card)) + return -EOPNOTSUPP; + mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 2; } else if (sysfs_streq(buf, "no_prio_queueing:3")) { - if (card->info.type == QETH_CARD_TYPE_IQD) { - rc = -EPERM; - goto out; - } card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 3; } else if (sysfs_streq(buf, "no_prio_queueing")) { @@ -480,8 +479,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if (card->info.type != QETH_CARD_TYPE_OSD && - card->info.type != QETH_CARD_TYPE_OSX) { + if (!IS_OSD(card) && !IS_OSX(card)) { rc = -EOPNOTSUPP; dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 93a53fed4cf8..4166eb29f0bd 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), + QETH_TXQ_STAT("Queue stopped", stopped), }; static const struct qeth_stats card_stats[] = { @@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev, CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } +static void qeth_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qeth_card *card = dev->ml_priv; + + channels->max_rx = dev->num_rx_queues; + channels->max_tx = card->qdio.no_out_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = dev->real_num_rx_queues; + channels->tx_count = dev->real_num_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + /* Helper function to fill 'advertising' and 'supported' which are the same. */ /* Autoneg and full-duplex are supported and advertised unconditionally. */ /* Always advertise and support all speeds up to specified, and only one */ @@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = { .get_ethtool_stats = qeth_get_ethtool_stats, .get_sset_count = qeth_get_sset_count, .get_drvinfo = qeth_get_drvinfo, + .get_channels = qeth_get_channels, .get_link_ksettings = qeth_get_link_ksettings, }; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c3067fd3bd9e..218801232ca2 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -149,29 +149,16 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) return rc; } -static void qeth_l2_del_all_macs(struct qeth_card *card) +static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) { struct qeth_mac *mac; struct hlist_node *tmp; int i; - spin_lock_bh(&card->mclock); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { hash_del(&mac->hnode); kfree(mac); } - spin_unlock_bh(&card->mclock); -} - -static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) -{ - if (card->info.type == QETH_CARD_TYPE_OSN) - return RTN_UNICAST; - if (is_broadcast_ether_addr(skb->data)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(skb->data)) - return RTN_MULTICAST; - return RTN_UNICAST; } static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, @@ -292,14 +279,16 @@ static void qeth_l2_stop_card(struct qeth_card *card) qeth_set_allowed_threads(card, 0, 1); + cancel_work_sync(&card->rx_mode_work); + qeth_l2_drain_rx_mode_cache(card); + if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } @@ -334,13 +323,11 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, case QETH_HEADER_TYPE_LAYER2: skb->protocol = eth_type_trans(skb, skb->dev); qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]); - if (skb->protocol == htons(ETH_P_802_2)) - *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; len = skb->len; napi_gro_receive(&card->napi, skb); break; case QETH_HEADER_TYPE_OSN: - if (card->info.type == QETH_CARD_TYPE_OSN) { + if (IS_OSN(card)) { skb_push(skb, sizeof(struct qeth_hdr)); skb_copy_to_linear_data(skb, hdr, sizeof(struct qeth_hdr)); @@ -391,8 +378,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) } /* some devices don't support a custom MAC address: */ - if (card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) + if (IS_OSM(card) || IS_OSX(card)) return (rc) ? rc : -EADDRNOTAVAIL; eth_hw_addr_random(card->dev); @@ -515,9 +501,11 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) hash_add(card->mac_htable, &mac->hnode, mac_hash); } -static void qeth_l2_set_rx_mode(struct net_device *dev) +static void qeth_l2_rx_mode_work(struct work_struct *work) { - struct qeth_card *card = dev->ml_priv; + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); + struct net_device *dev = card->dev; struct netdev_hw_addr *ha; struct qeth_mac *mac; struct hlist_node *tmp; @@ -526,12 +514,12 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) QETH_CARD_TEXT(card, 3, "setmulti"); - spin_lock_bh(&card->mclock); - + netif_addr_lock_bh(dev); netdev_for_each_mc_addr(ha, dev) qeth_l2_add_mac(card, ha); netdev_for_each_uc_addr(ha, dev) qeth_l2_add_mac(card, ha); + netif_addr_unlock_bh(dev); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { switch (mac->disp_flag) { @@ -554,8 +542,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) } } - spin_unlock_bh(&card->mclock); - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) qeth_setadp_promisc_mode(card); else @@ -586,7 +572,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, } elements += qeth_count_elements(skb, hd_len); - if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) { + if (elements > queue->max_elements) { rc = -E2BIG; goto out; } @@ -603,37 +589,45 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - int cast_type = qeth_l2_get_cast_type(card, skb); - int ipv = qeth_get_ip_version(skb); + u16 txq = skb_get_queue_mapping(skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; int rc; - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - netif_stop_queue(dev); + if (IS_IQD(card)) + txq = qeth_iqd_translate_txq(dev, txq); + queue = card->qdio.out_qs[txq]; if (IS_OSN(card)) rc = qeth_l2_xmit_osn(card, skb, queue); else - rc = qeth_xmit(card, skb, queue, ipv, cast_type, + rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), + qeth_get_ether_cast_type(skb), qeth_l2_fill_header); if (!rc) { QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); - netif_wake_queue(dev); return NETDEV_TX_OK; - } else if (rc == -EBUSY) { - return NETDEV_TX_BUSY; - } /* else fall through */ + } QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); - netif_wake_queue(dev); return NETDEV_TX_OK; } +static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (IS_IQD(card)) + return qeth_iqd_select_queue(dev, skb, + qeth_get_ether_cast_type(skb), + sb_dev); + return qeth_get_priority_queue(card, skb); +} + static const struct device_type qeth_l2_devtype = { .name = "qeth_layer2", .groups = qeth_l2_attr_groups, @@ -653,6 +647,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) } hash_init(card->mac_htable); + INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); return 0; } @@ -673,12 +668,20 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) unregister_netdev(card->dev); } +static void qeth_l2_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_features_check = qeth_features_check, + .ndo_select_queue = qeth_l2_select_queue, .ndo_validate_addr = qeth_l2_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -721,7 +724,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } - if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { + if (IS_OSD(card) && !IS_VM_NIC(card)) { card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { @@ -831,8 +834,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - if ((card->info.type == QETH_CARD_TYPE_OSD) || - (card->info.type == QETH_CARD_TYPE_OSX)) { + if (IS_OSD(card) || IS_OSX(card)) { rc = qeth_l2_start_ipassists(card); if (rc) goto out_remove; @@ -1042,13 +1044,13 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 5, "osndctrd"); - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - qeth_prepare_control_data(card, len, iob); + wait_event(card->wait_q, qeth_trylock_channel(channel)); + iob->finalize(card, iob, len); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); + (addr_t) iob, 0, 0, iob->timeout); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " @@ -1456,9 +1458,8 @@ static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, enum qeth_ipa_sbp_cmd sbp_cmd, unsigned int cmd_length) { - enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? - IPA_CMD_SETBRIDGEPORT_IQD : - IPA_CMD_SETBRIDGEPORT_OSA; + enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 53712cf26406..0271833da6a2 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -246,9 +246,9 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) */ if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); rc = qeth_l3_register_addr_entry(card, addr); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); addr->in_progress = 0; } else rc = qeth_l3_register_addr_entry(card, addr); @@ -268,6 +268,30 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return rc; } +static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, + bool add) +{ + int rc; + + mutex_lock(&card->ip_lock); + rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); + mutex_unlock(&card->ip_lock); + + return rc; +} + +static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_del(&addr->hnode); + kfree(addr); + } +} + static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) { struct qeth_ipaddr *addr; @@ -276,7 +300,7 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) QETH_CARD_TEXT(card, 4, "clearip"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { if (!recover) { @@ -287,19 +311,9 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) addr->disp_flag = QETH_DISP_ADDR_ADD; } - spin_unlock_bh(&card->ip_lock); - - spin_lock_bh(&card->mclock); - - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { - hash_del(&addr->hnode); - kfree(addr); - } - - spin_unlock_bh(&card->mclock); - - + mutex_unlock(&card->ip_lock); } + static void qeth_l3_recover_ip(struct qeth_card *card) { struct qeth_ipaddr *addr; @@ -309,15 +323,15 @@ static void qeth_l3_recover_ip(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "recovrip"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { if (addr->disp_flag == QETH_DISP_ADDR_ADD) { if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); rc = qeth_l3_register_addr_entry(card, addr); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); addr->in_progress = 0; } else rc = qeth_l3_register_addr_entry(card, addr); @@ -333,8 +347,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) } } - spin_unlock_bh(&card->ip_lock); - + mutex_unlock(&card->ip_lock); } static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -461,7 +474,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, static int qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) { - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { switch (*type) { case NO_ROUTER: case PRIMARY_CONNECTOR: @@ -559,7 +572,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) { struct qeth_ipato_entry *ipatoe, *tmp; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { list_del(&ipatoe->entry); @@ -567,7 +580,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) } qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } int qeth_l3_add_ipato_entry(struct qeth_card *card, @@ -578,7 +591,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "addipato"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != new->proto) @@ -596,7 +609,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, qeth_l3_update_ipato(card); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); return rc; } @@ -610,7 +623,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "delipato"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { if (ipatoe->proto != proto) @@ -625,7 +638,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, } } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); return rc; } @@ -634,7 +647,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, enum qeth_prot_versions proto) { struct qeth_ipaddr addr; - int rc; qeth_l3_init_ipaddr(&addr, type, proto); if (proto == QETH_PROT_IPV4) @@ -642,16 +654,13 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, else memcpy(&addr.u.a6.addr, ip, 16); - spin_lock_bh(&card->ip_lock); - rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); - spin_unlock_bh(&card->ip_lock); - return rc; + return qeth_l3_modify_ip(card, &addr, add); } int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) { struct qeth_ipaddr addr; - int rc, i; + unsigned int i; qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); addr.u.a6.addr.s6_addr[0] = 0xfe; @@ -659,10 +668,7 @@ int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) for (i = 0; i < 8; i++) addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; - spin_lock_bh(&card->ip_lock); - rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); - spin_unlock_bh(&card->ip_lock); - return rc; + return qeth_l3_modify_ip(card, &addr, add); } static int qeth_l3_register_addr_entry(struct qeth_card *card, @@ -848,7 +854,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "softipv6"); - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) goto out; rc = qeth_send_simple_setassparms(card, IPA_IPV6, @@ -1374,8 +1380,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, switch (hdr->hdr.l3.id) { case QETH_HEADER_TYPE_LAYER3: magic = *(__u16 *)skb->data; - if ((card->info.type == QETH_CARD_TYPE_IQD) && - (magic == ETH_P_AF_IUCV)) { + if (IS_IQD(card) && magic == ETH_P_AF_IUCV) { len = skb->len; dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, "FAKELL", len); @@ -1413,6 +1418,9 @@ static void qeth_l3_stop_card(struct qeth_card *card) qeth_set_allowed_threads(card, 0, 1); + cancel_work_sync(&card->rx_mode_work); + qeth_l3_drain_rx_mode_cache(card); + if (card->options.sniffer && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); @@ -1424,7 +1432,7 @@ static void qeth_l3_stop_card(struct qeth_card *card) } if (card->state == CARD_STATE_HARDSETUP) { qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } @@ -1451,7 +1459,7 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) return; - if (card->info.guestlan) { /* Guestlan trace */ + if (IS_VM_NIC(card)) { /* Guestlan trace */ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) qeth_setadp_promisc_mode(card); } else if (card->options.sniffer && /* HiperSockets trace */ @@ -1466,9 +1474,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) } } -static void qeth_l3_set_rx_mode(struct net_device *dev) +static void qeth_l3_rx_mode_work(struct work_struct *work) { - struct qeth_card *card = dev->ml_priv; + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); struct qeth_ipaddr *addr; struct hlist_node *tmp; int i, rc; @@ -1476,8 +1485,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) QETH_CARD_TEXT(card, 3, "setmulti"); if (!card->options.sniffer) { - spin_lock_bh(&card->mclock); - qeth_l3_add_multicast_ipv4(card); qeth_l3_add_multicast_ipv6(card); @@ -1505,8 +1512,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) } } - spin_unlock_bh(&card->mclock); - if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) return; } @@ -1551,7 +1556,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1783,7 +1788,7 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card, * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1816,7 +1821,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) + if (IS_VM_NIC(card) || IS_IQD(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1913,13 +1918,7 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) RTN_MULTICAST : RTN_UNICAST; default: /* ... and MAC address */ - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, - skb->dev->broadcast)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) - return RTN_MULTICAST; - /* default to unicast */ - return RTN_UNICAST; + return qeth_get_ether_cast_type(skb); } } @@ -1977,19 +1976,14 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); } + l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type); + /* OSA only: */ if (!ipv) { - hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, - skb->dev->broadcast)) - hdr->hdr.l3.flags |= QETH_CAST_BROADCAST; - else - hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ? - QETH_CAST_MULTICAST : QETH_CAST_UNICAST; + l3_hdr->flags |= QETH_HDR_PASSTHRU; return; } - hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type); rcu_read_lock(); if (ipv == 4) { struct rtable *rt = skb_rtable(skb); @@ -2007,7 +2001,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr; hdr->hdr.l3.flags |= QETH_HDR_IPV6; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; } rcu_read_unlock(); @@ -2030,7 +2024,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb) static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - unsigned char eth_hdr[ETH_HLEN]; unsigned int hw_hdr_len; int rc; @@ -2040,45 +2033,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); if (rc) return rc; - skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); skb_pull(skb, ETH_HLEN); qeth_l3_fixup_headers(skb); - rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); - if (rc == -EBUSY) { - /* roll back to ETH header */ - skb_push(skb, ETH_HLEN); - skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); - } - return rc; + return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); } static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { - int cast_type = qeth_l3_get_cast_type(skb); struct qeth_card *card = dev->ml_priv; + u16 txq = skb_get_queue_mapping(skb); int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; - int rc; - - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + int cast_type, rc; if (IS_IQD(card)) { + queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; + if (card->options.sniffer) goto tx_drop; if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || (card->options.cq == QETH_CQ_ENABLED && skb->protocol != htons(ETH_P_AF_IUCV))) goto tx_drop; + + if (txq == QETH_IQD_MCAST_TXQ) + cast_type = qeth_l3_get_cast_type(skb); + else + cast_type = RTN_UNICAST; + } else { + queue = card->qdio.out_qs[txq]; + cast_type = qeth_l3_get_cast_type(skb); } if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) goto tx_drop; - netif_stop_queue(dev); - if (ipv == 4 || IS_IQD(card)) rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); else @@ -2088,19 +2080,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (!rc) { QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); - netif_wake_queue(dev); return NETDEV_TX_OK; - } else if (rc == -EBUSY) { - return NETDEV_TX_BUSY; - } /* else fall through */ + } tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); - netif_wake_queue(dev); return NETDEV_TX_OK; } +static void qeth_l3_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + /* * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting * NOARP on the netdevice is no option because it also turns off neighbor @@ -2134,11 +2129,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, return qeth_features_check(skb, dev, features); } +static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), + sb_dev); +} + +static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + return qeth_get_priority_queue(card, skb); +} + static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_select_queue = qeth_l3_iqd_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2155,6 +2166,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_features_check = qeth_l3_osa_features_check, + .ndo_select_queue = qeth_l3_osa_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2171,8 +2183,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) unsigned int headroom; int rc; - if (card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) { + if (IS_OSD(card) || IS_OSX(card)) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || (card->info.link_type == QETH_LINK_TYPE_HSTR)) { pr_info("qeth_l3: ignoring TR device\n"); @@ -2186,7 +2197,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; - if (!card->info.guestlan) { + if (!IS_VM_NIC(card)) { card->dev->features |= NETIF_F_SG; card->dev->hw_features |= NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_IP_CSUM; @@ -2210,7 +2221,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) headroom = sizeof(struct qeth_hdr_tso); else headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; - } else if (card->info.type == QETH_CARD_TYPE_IQD) { + } else if (IS_IQD(card)) { card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; headroom = sizeof(struct qeth_hdr) - ETH_HLEN; @@ -2253,14 +2264,22 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) int rc; hash_init(card->ip_htable); + mutex_init(&card->ip_lock); + card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, + dev_name(&gdev->dev)); + if (!card->cmd_wq) + return -ENOMEM; if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l3_create_device_attributes(&gdev->dev); - if (rc) + if (rc) { + destroy_workqueue(card->cmd_wq); return rc; + } } hash_init(card->ip_mc_htable); + INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); return 0; } @@ -2280,6 +2299,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); + + flush_workqueue(card->cmd_wq); + destroy_workqueue(card->cmd_wq); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } @@ -2517,20 +2539,40 @@ static int qeth_l3_handle_ip_event(struct qeth_card *card, { switch (event) { case NETDEV_UP: - spin_lock_bh(&card->ip_lock); - qeth_l3_add_ip(card, addr); - spin_unlock_bh(&card->ip_lock); + qeth_l3_modify_ip(card, addr, true); return NOTIFY_OK; case NETDEV_DOWN: - spin_lock_bh(&card->ip_lock); - qeth_l3_delete_ip(card, addr); - spin_unlock_bh(&card->ip_lock); + qeth_l3_modify_ip(card, addr, false); return NOTIFY_OK; default: return NOTIFY_DONE; } } +struct qeth_l3_ip_event_work { + struct work_struct work; + struct qeth_card *card; + struct qeth_ipaddr addr; +}; + +#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) + +static void qeth_l3_add_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); + kfree(work); +} + +static void qeth_l3_delete_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); + kfree(work); +} + static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) { if (is_vlan_dev(dev)) @@ -2575,9 +2617,12 @@ static int qeth_l3_ip6_event(struct notifier_block *this, { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct net_device *dev = ifa->idev->dev; - struct qeth_ipaddr addr; + struct qeth_l3_ip_event_work *ip_work; struct qeth_card *card; + if (event != NETDEV_UP && event != NETDEV_DOWN) + return NOTIFY_DONE; + card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; @@ -2585,11 +2630,23 @@ static int qeth_l3_ip6_event(struct notifier_block *this, if (!qeth_is_supported(card, IPA_IPV6)) return NOTIFY_DONE; - qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); - addr.u.a6.addr = ifa->addr; - addr.u.a6.pfxlen = ifa->prefix_len; + ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); + if (!ip_work) + return NOTIFY_DONE; - return qeth_l3_handle_ip_event(card, &addr, event); + if (event == NETDEV_UP) + INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); + else + INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); + + ip_work->card = card; + qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, + QETH_PROT_IPV6); + ip_work->addr.u.a6.addr = ifa->addr; + ip_work->addr.u.a6.pfxlen = ifa->prefix_len; + + queue_work(card->cmd_wq, &ip_work->work); + return NOTIFY_OK; } static struct notifier_block qeth_l3_ip6_notifier = { diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index cff518b0f904..2f73b33c9347 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -206,7 +206,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; if (card->options.cq == QETH_CQ_ENABLED) return -EPERM; @@ -258,7 +258,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); @@ -276,7 +276,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; if (card->state != CARD_STATE_DOWN) return -EPERM; @@ -367,9 +367,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, if (card->ipato.enabled != enable) { card->ipato.enabled = enable; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -412,9 +412,9 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, if (card->ipato.invert4 != invert) { card->ipato.invert4 = invert; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -436,7 +436,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; /* add strlen for "/<mask>\n" */ entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != proto) continue; @@ -449,7 +449,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, i += snprintf(buf + i, PAGE_SIZE - i, "%s/%i\n", addr_str, ipatoe->mask_bits); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; @@ -598,9 +598,9 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, if (card->ipato.invert6 != invert) { card->ipato.invert6 = invert; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -684,7 +684,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto || ipaddr->type != type) continue; @@ -698,7 +698,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", addr_str); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); return str_len; diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 8dde5a40e253..2c088af44c8b 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c index 143e3f9b31aa..0a20a4e9e19a 100644 --- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c @@ -404,8 +404,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev, - select_queue_fallback_t fallback) + struct net_device *sb_dev) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9704b135a7bc..40b29ca5a98d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -441,26 +441,26 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) static const struct genl_ops tcmu_genl_ops[] = { { .cmd = TCMU_CMD_SET_FEATURES, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, - .policy = tcmu_attr_policy, .doit = tcmu_genl_set_features, }, { .cmd = TCMU_CMD_ADDED_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, - .policy = tcmu_attr_policy, .doit = tcmu_genl_add_dev_done, }, { .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, - .policy = tcmu_attr_policy, .doit = tcmu_genl_rm_dev_done, }, { .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, - .policy = tcmu_attr_policy, .doit = tcmu_genl_reconfig_dev_done, }, }; @@ -472,6 +472,7 @@ static struct genl_family tcmu_genl_family __ro_after_init = { .name = "TCM-USER", .version = 2, .maxattr = TCMU_ATTR_MAX, + .policy = tcmu_attr_policy, .mcgrps = tcmu_mcgrps, .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), .netnsok = true, |