diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-29 03:02:04 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-03-29 03:02:04 +0300 |
commit | d717e4cae0fe77e10a27e8545a967b8c379873ac (patch) | |
tree | b4ff5fdf82bf40c68b84aaa6f666b5036b2e7383 /drivers/net | |
parent | cffb2b72d3ed47f5093d128bd44d9ce136b6b5af (diff) | |
parent | 20695e9a9fd39103d1b0669470ae74030b7aa196 (diff) | |
download | linux-d717e4cae0fe77e10a27e8545a967b8c379873ac.tar.xz |
Merge tag 'net-5.18-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
"Including fixes from netfilter.
Current release - regressions:
- llc: only change llc->dev when bind() succeeds, fix null-deref
Current release - new code bugs:
- smc: fix a memory leak in smc_sysctl_net_exit()
- dsa: realtek: make interface drivers depend on OF
Previous releases - regressions:
- sched: act_ct: fix ref leak when switching zones
Previous releases - always broken:
- netfilter: egress: report interface as outgoing
- vsock/virtio: enable VQs early on probe and finish the setup before
using them
Misc:
- memcg: enable accounting for nft objects"
* tag 'net-5.18-rc0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (39 commits)
Revert "selftests: net: Add tls config dependency for tls selftests"
net/smc: Send out the remaining data in sndbuf before close
net: move net_unlink_todo() out of the header
net: dsa: bcm_sf2_cfp: fix an incorrect NULL check on list iterator
net: bnxt_ptp: fix compilation error
selftests: net: Add tls config dependency for tls selftests
memcg: enable accounting for nft objects
net/sched: act_ct: fix ref leak when switching zones
net/smc: fix a memory leak in smc_sysctl_net_exit()
selftests: tls: skip cmsg_to_pipe tests with TLS=n
octeontx2-af: initialize action variable
net: sparx5: switchdev: fix possible NULL pointer dereference
net/x25: Fix null-ptr-deref caused by x25_disconnect
qlcnic: dcb: default to returning -EOPNOTSUPP
net: sparx5: depends on PTP_1588_CLOCK_OPTIONAL
net: hns3: fix phy can not link up when autoneg off and reset
net: hns3: add NULL pointer check for hns3_set/get_ringparam()
net: hns3: add netdev reset check for hns3_set_tunable()
net: hns3: clean residual vf config after disable sriov
net: hns3: add max order judgement for tx spare buffer
...
Diffstat (limited to 'drivers/net')
24 files changed, 341 insertions, 114 deletions
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index a7e2fcf2df2c..edbe5e7f1cb6 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, int port, u32 location) { - struct cfp_rule *rule = NULL; + struct cfp_rule *rule; list_for_each_entry(rule, &priv->cfp.rules_list, next) { if (rule->port == port && rule->fs.location == location) - break; + return rule; } - return rule; + return NULL; } static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index b7427a8292b2..1aa79735355f 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -12,6 +12,7 @@ menuconfig NET_DSA_REALTEK config NET_DSA_REALTEK_MDIO tristate "Realtek MDIO connected switch driver" depends on NET_DSA_REALTEK + depends on OF help Select to enable support for registering switches configured through MDIO. @@ -19,6 +20,7 @@ config NET_DSA_REALTEK_MDIO config NET_DSA_REALTEK_SMI tristate "Realtek SMI connected switch driver" depends on NET_DSA_REALTEK + depends on OF help Select to enable support for registering switches connected through SMI. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index a0b321a19361..9c2ad5e67a5d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -382,7 +382,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); struct bnxt *bp = ptp->bp; - u8 pin_id; + int pin_id; int rc; switch (rq->type) { @@ -390,6 +390,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, /* Configure an External PPS IN */ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, rq->extts.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; if (!on) break; rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN); @@ -403,6 +405,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, /* Configure a Periodic PPS OUT */ pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, rq->perout.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; if (!on) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 373baf45884b..530b9922608c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -31,7 +31,7 @@ struct pps_pin { u8 state; }; -#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS)) +#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS)) #define EVENT_DATA2_PPS_EVENT_TYPE(data2) \ ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index fa5b4f885b17..60ec64bfb3f0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -674,7 +674,10 @@ static int enetc_get_ts_info(struct net_device *ndev, #ifdef CONFIG_FSL_ENETC_PTP_CLOCK info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON) | diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 6f18c9a03231..d44dd7091fa1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -537,6 +537,8 @@ struct hnae3_ae_dev { * Get 1588 rx hwstamp * get_ts_info * Get phc info + * clean_vf_config + * Clean residual vf info after disable sriov */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -730,6 +732,7 @@ struct hnae3_ae_ops { struct ethtool_ts_info *info); int (*get_link_diagnosis_info)(struct hnae3_handle *handle, u32 *status_code); + void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 0b8a73c40b12..14dc12c2155d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1028,46 +1028,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { + u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; struct hns3_tx_spare *tx_spare; struct page *page; - u32 alloc_size; dma_addr_t dma; int order; - alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; if (!alloc_size) return; order = get_order(alloc_size); + if (order >= MAX_ORDER) { + if (net_ratelimit()) + dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); + return; + } + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), GFP_KERNEL); if (!tx_spare) { /* The driver still work without the tx spare buffer */ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); - return; + goto devm_kzalloc_error; } page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), GFP_KERNEL, order); if (!page) { dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto alloc_pages_error; } dma = dma_map_page(ring_to_dev(ring), page, 0, PAGE_SIZE << order, DMA_TO_DEVICE); if (dma_mapping_error(ring_to_dev(ring), dma)) { dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); - put_page(page); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto dma_mapping_error; } tx_spare->dma = dma; tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + return; + +dma_mapping_error: + put_page(page); +alloc_pages_error: + devm_kfree(ring_to_dev(ring), tx_spare); +devm_kzalloc_error: + ring->tqp->handle->kinfo.tx_spare_buf_size = 0; } /* Use hns3_tx_spare_space() to make sure there is enough buffer @@ -3050,6 +3060,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; } +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + /* hns3_remove - Device removal routine * @pdev: PCI device information struct */ @@ -3088,7 +3113,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) else return num_vfs; } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6469238ae090..f4da77452126 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -653,8 +653,8 @@ static void hns3_get_ringparam(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int rx_queue_index = h->kinfo.num_tqps; - if (hns3_nic_resetting(netdev)) { - netdev_err(netdev, "dev resetting!"); + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n"); return; } @@ -1074,8 +1074,14 @@ static int hns3_check_ringparam(struct net_device *ndev, { #define RX_BUF_LEN_2K 2048 #define RX_BUF_LEN_4K 4096 - if (hns3_nic_resetting(ndev)) + + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (hns3_nic_resetting(ndev) || !priv->ring) { + netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n"); return -EBUSY; + } + if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; @@ -1766,9 +1772,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int ret; - if (hns3_nic_resetting(netdev)) - return -EBUSY; - h->kinfo.tx_spare_buf_size = data; ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); @@ -1799,6 +1802,11 @@ static int hns3_set_tunable(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int i, ret = 0; + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to set tunable value, dev resetting!"); + return -EBUSY; + } + switch (tuna->id) { case ETHTOOL_TX_COPYBREAK: priv->tx_copybreak = *(u32 *)data; @@ -1818,7 +1826,8 @@ static int hns3_set_tunable(struct net_device *netdev, old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); - if (ret) { + if (ret || + (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; netdev_warn(netdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 78d0498bdabc..2a5e6a241d52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1872,6 +1872,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; vport->mps = HCLGE_MAC_DEFAULT_FRAME; vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; vport->rxvlan_cfg.rx_vlan_offload_en = true; vport->req_vlan_fltr_en = true; INIT_LIST_HEAD(&vport->vlan_list); @@ -8438,12 +8439,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); - if (!ret) { + if (!ret || ret == -ENOENT) { mutex_lock(&hdev->vport_lock); hclge_update_umv_space(vport, true); mutex_unlock(&hdev->vport_lock); - } else if (ret == -ENOENT) { - ret = 0; + return 0; } return ret; @@ -8993,11 +8993,16 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, ether_addr_copy(vport->vf_info.mac, mac_addr); + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s, and it will be reinitialized!\n", vf, format_mac_addr); - return hclge_inform_reset_assert_to_vf(vport); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; } dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", @@ -9818,19 +9823,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) - if (vlan->vlan_id == vlan_id) + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); return; + } + } vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) + if (!vlan) { + mutex_unlock(&hdev->vport_lock); return; + } vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); } static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) @@ -9839,6 +9853,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) struct hclge_dev *hdev = vport->back; int ret; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (!vlan->hd_tbl_status) { ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), @@ -9848,12 +9864,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) dev_err(&hdev->pdev->dev, "restore vport vlan list failed, ret=%d\n", ret); + + mutex_unlock(&hdev->vport_lock); return ret; } } vlan->hd_tbl_status = true; } + mutex_unlock(&hdev->vport_lock); + return 0; } @@ -9863,6 +9883,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->vlan_id == vlan_id) { if (is_write_tbl && vlan->hd_tbl_status) @@ -9877,6 +9899,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, break; } } + + mutex_unlock(&hdev->vport_lock); } void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) @@ -9884,6 +9908,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->hd_tbl_status) hclge_set_vlan_filter_hw(hdev, @@ -9899,6 +9925,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) } } clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); } void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) @@ -9907,6 +9934,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) struct hclge_vport *vport; int i; + mutex_lock(&hdev->vport_lock); + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { @@ -9914,37 +9943,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) kfree(vlan); } } + + mutex_unlock(&hdev->vport_lock); } -void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) { - struct hclge_vport_vlan_cfg *vlan, *tmp; - struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; u16 vlan_proto; u16 vlan_id; u16 state; + int vf_id; int ret; - vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; - vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - state = vport->port_base_vlan_cfg.state; + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; - if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { - clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); - hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, - false); - return; + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } } +} - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, false); - if (ret) - break; - vlan->hd_tbl_status = true; +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } } + + mutex_unlock(&hdev->vport_lock); } /* For global reset and imp reset, hardware will clear the mac table, @@ -9984,6 +10037,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) struct hnae3_handle *handle = &vport->nic; hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); hclge_restore_fd_entries(handle); @@ -10040,6 +10094,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, false); } + vport->port_base_vlan_cfg.tbl_sta = false; + /* force add VLAN 0 */ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); if (ret) @@ -10129,7 +10185,9 @@ out: else nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; hclge_set_vport_vlan_fltr_change(vport); return 0; @@ -10197,14 +10255,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, return ret; } - /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + * for DEVICE_VERSION_V3, vf doesn't need to know about the port based * VLAN state. */ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) - hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, state, - &vlan_info); + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, &vlan_info); return 0; } @@ -11838,8 +11899,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); - mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -12663,6 +12724,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, return 0; } +/* After disable sriov, VF still has some config and info need clean, + * which configed by PF. + */ +static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + int ret; + + /* after disable sriov, clean VF rate configured by PF */ + ret = hclge_tm_qs_shaper_cfg(vport, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d rate config, ret = %d\n", + vfid, ret); + + vlan_info.vlan_tag = 0; + vlan_info.qos = 0; + vlan_info.vlan_proto = ETH_P_8021Q; + ret = hclge_update_port_base_vlan_cfg(vport, + HNAE3_PORT_BASE_VLAN_DISABLE, + &vlan_info); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d port base vlan, ret = %d\n", + vfid, ret); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d spoof config, ret = %d\n", + vfid, ret); + + memset(&vport->vf_info, 0, sizeof(vport->vf_info)); +} + +static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < num_vfs; i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + hclge_clear_vport_vf_info(vport, i); + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -12764,6 +12874,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, .get_link_diagnosis_info = hclge_get_link_diagnosis_info, + .clean_vf_config = hclge_clean_vport_config, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index fc92ae385e30..c70239758bb2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -985,7 +985,9 @@ struct hclge_vlan_info { struct hclge_port_base_vlan_config { u16 state; + bool tbl_sta; struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; }; struct hclge_vf_info { @@ -1031,6 +1033,7 @@ struct hclge_vport { spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; @@ -1100,6 +1103,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); void hclge_restore_vport_vlan_table(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 63d2be4349e3..03d63b6a9b2b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, int ret; if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) - return 0; + return -EBUSY; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); @@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int ret; if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) - return 0; + return -EBUSY; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 93389bec8d89..342d7cdf6285 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2862,6 +2862,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); hclgevf_init_rxd_adv_layout(hdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 91f86d77cd41..3a31fb8cc155 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -605,7 +605,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, struct npc_install_flow_req req = { 0 }; struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; int blkaddr, index; /* AF's and SDP VFs work in promiscuous mode */ @@ -626,7 +626,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, index); } else { - *(u64 *)&action = 0x00; action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } @@ -657,7 +656,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; u64 relaxed_mask; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) @@ -685,14 +684,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); action.index = pfvf->promisc_mce_idx; @@ -832,7 +831,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, struct rvu_hwinfo *hw = rvu->hw; int blkaddr, ucast_idx, index; u8 mac_addr[ETH_ALEN] = { 0 }; - struct nix_rx_action action; + struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; u16 vf_func; @@ -861,14 +860,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, blkaddr, ucast_idx); if (action.op != NIX_RX_ACTIONOP_RSS) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) { - *(u64 *)&action = 0x00; + *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; action.index = pfvf->mcast_mce_idx; } diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index 7bdbb2d09a14..85b24edb65d5 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -4,6 +4,7 @@ config SPARX5_SWITCH depends on HAS_IOMEM depends on OF depends on ARCH_SPARX5 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 2dc87584023a..1e9ff365459e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -422,6 +422,8 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) db_hw->dataptr = phys; db_hw->status = 0; db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + if (!db) + return -ENOMEM; db->cpu_addr = cpu_addr; list_add_tail(&db->list, &tx->db_list); } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c index 35abb3d0ce19..a5837dbe0c7e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -212,19 +212,7 @@ bool sparx5_mact_find(struct sparx5 *sparx5, mutex_unlock(&sparx5->lock); - return ret == 0; -} - -static int sparx5_mact_lookup(struct sparx5 *sparx5, - const unsigned char mac[ETH_ALEN], - u16 vid) -{ - u32 pcfg2; - - if (sparx5_mact_find(sparx5, mac, vid, &pcfg2)) - return 1; - - return 0; + return ret; } int sparx5_mact_forget(struct sparx5 *sparx5, @@ -305,9 +293,10 @@ int sparx5_add_mact_entry(struct sparx5 *sparx5, { struct sparx5_mact_entry *mact_entry; int ret; + u32 cfg2; - ret = sparx5_mact_lookup(sparx5, addr, vid); - if (ret) + ret = sparx5_mact_find(sparx5, addr, vid, &cfg2); + if (!ret) return 0; /* In case the entry already exists, don't add it again to SW, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 7a04b8f2a546..b197129044b5 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -65,13 +65,10 @@ enum sparx5_vlan_port_type { #define PGID_IPV6_MC_CTRL (PGID_BASE + 5) #define PGID_BCAST (PGID_BASE + 6) #define PGID_CPU (PGID_BASE + 7) +#define PGID_MCAST_START (PGID_BASE + 8) #define PGID_TABLE_SIZE 3290 -#define PGID_MCAST_START 65 -#define PGID_GLAG_START 833 -#define PGID_GLAG_END 1088 - #define IFH_LEN 9 /* 36 bytes */ #define NULL_VID 0 #define SPX5_MACT_PULL_DELAY (2 * HZ) @@ -328,6 +325,7 @@ void sparx5_mact_init(struct sparx5 *sparx5); /* sparx5_vlan.c */ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable); +void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]); void sparx5_update_fwd(struct sparx5 *sparx5); void sparx5_vlan_init(struct sparx5 *sparx5); void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno); @@ -374,7 +372,6 @@ enum sparx5_pgid_type { SPX5_PGID_FREE, SPX5_PGID_RESERVED, SPX5_PGID_MULTICAST, - SPX5_PGID_GLAG }; void sparx5_pgid_init(struct sparx5 *spx5); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c index 90366fcb9958..af8b435009f4 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c @@ -15,28 +15,14 @@ void sparx5_pgid_init(struct sparx5 *spx5) spx5->pgid_map[i] = SPX5_PGID_RESERVED; } -int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx) -{ - int i; - - for (i = PGID_GLAG_START; i <= PGID_GLAG_END; i++) - if (spx5->pgid_map[i] == SPX5_PGID_FREE) { - spx5->pgid_map[i] = SPX5_PGID_GLAG; - *idx = i; - return 0; - } - - return -EBUSY; -} - int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx) { int i; + /* The multicast area starts at index 65, but the first 7 + * are reserved for flood masks and CPU. Start alloc after that. + */ for (i = PGID_MCAST_START; i < PGID_TABLE_SIZE; i++) { - if (i == PGID_GLAG_START) - i = PGID_GLAG_END + 1; - if (spx5->pgid_map[i] == SPX5_PGID_FREE) { spx5->pgid_map[i] = SPX5_PGID_MULTICAST; *idx = i; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 2d8e0b81c839..5389fffc694a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -406,11 +406,11 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev, res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry); - if (res) { + if (res == 0) { pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry); - /* MC_IDX has an offset of 65 in the PGID table. */ - pgid_idx += PGID_MCAST_START; + /* MC_IDX starts after the port masks in the PGID table */ + pgid_idx += SPX5_PORTS; sparx5_pgid_update_mask(port, pgid_idx, true); } else { err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx); @@ -468,17 +468,15 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev, res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry); - if (res) { + if (res == 0) { pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry); - /* MC_IDX has an offset of 65 in the PGID table. */ - pgid_idx += PGID_MCAST_START; + /* MC_IDX starts after the port masks in the PGID table */ + pgid_idx += SPX5_PORTS; sparx5_pgid_update_mask(port, pgid_idx, false); - pgid_entry[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid_idx)); - pgid_entry[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid_idx)); - pgid_entry[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid_idx)); - if (pgid_entry[0] == 0 && pgid_entry[1] == 0 && pgid_entry[2] == 0) { + sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry); + if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) { /* No ports are in MC group. Remove entry */ err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx); if (err) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index 8e56ffa1c4f7..37e4ac965849 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -138,6 +138,13 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable) } } +void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3]) +{ + portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid)); + portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid)); + portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid)); +} + void sparx5_update_fwd(struct sparx5 *sparx5) { DECLARE_BITMAP(workmask, SPX5_PORTS); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 5d79ee4370bc..7519773eaca6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_hw_capability) return dcb->ops->get_hw_capability(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) @@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->attach) return dcb->ops->attach(dcb); - return 0; + return -EOPNOTSUPP; } static inline int @@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) if (dcb && dcb->ops->query_hw_capability) return dcb->ops->query_hw_capability(dcb, buf); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) @@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) if (dcb && dcb->ops->query_cee_param) return dcb->ops->query_cee_param(dcb, buf, type); - return 0; + return -EOPNOTSUPP; } static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) @@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_cee_cfg) return dcb->ops->get_cee_cfg(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 0cc28c79cc61..835caa15d55f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -487,6 +487,13 @@ static int ethqos_clks_config(void *priv, bool enabled) dev_err(ðqos->pdev->dev, "rgmii_clk enable failed\n"); return ret; } + + /* Enable functional clock to prevent DMA reset to timeout due + * to lacking PHY clock after the hardware block has been power + * cycled. The actual configuration will be adjusted once + * ethqos_fix_mac_speed() is invoked. + */ + ethqos_set_func_clk_en(ethqos); } else { clk_disable_unprepare(ethqos->rgmii_clk); } diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 3c683e0e40e9..e36809aa6d30 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -11,6 +11,7 @@ */ #include "bcm-phy-lib.h" +#include <linux/delay.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/brcmphy.h> @@ -602,6 +603,26 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0) return err; + /* The datasheet indicates the PHY needs up to 1us to complete a reset, + * build some slack here. + */ + usleep_range(1000, 2000); + + /* The PHY requires 65 MDC clock cycles to complete a write operation + * and turnaround the line properly. + * + * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac) + * may flag the lack of turn-around as a read failure. This is + * particularly true with this combination since the MDIO controller + * only used 64 MDC cycles. This is not a critical failure in this + * specific case and it has no functional impact otherwise, so we let + * that one go through. If there is a genuine bus error, the next read + * of MII_BRCM_FET_INTREG will error out. + */ + err = phy_read(phydev, MII_BMCR); + if (err < 0 && err != -EIO) + return err; + reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index a31098981a65..e2fa56b92685 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1872,6 +1872,45 @@ static const struct driver_info mct_info = { .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info at_umc2000_info = { + .description = "AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + +static const struct driver_info at_umc200_info = { + .description = "AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + +static const struct driver_info at_umc2000sp_info = { + .description = "AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ @@ -1913,6 +1952,18 @@ static const struct usb_device_id products[] = { /* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */ USB_DEVICE(0x0711, 0x0179), .driver_info = (unsigned long)&mct_info, +}, { + /* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ + USB_DEVICE(0x07c9, 0x000e), + .driver_info = (unsigned long)&at_umc2000_info, +}, { + /* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */ + USB_DEVICE(0x07c9, 0x000f), + .driver_info = (unsigned long)&at_umc200_info, +}, { + /* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ + USB_DEVICE(0x07c9, 0x0010), + .driver_info = (unsigned long)&at_umc2000sp_info, }, { }, }; |