diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 01:27:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 01:27:06 +0300 |
commit | 70e71ca0af244f48a5dcf56dc435243792e3a495 (patch) | |
tree | f7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | |
parent | bae41e45b7400496b9bf0c70c6004419d9987819 (diff) | |
parent | 00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff) | |
download | linux-70e71ca0af244f48a5dcf56dc435243792e3a495.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) New offloading infrastructure and example 'rocker' driver for
offloading of switching and routing to hardware.
This work was done by a large group of dedicated individuals, not
limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend,
Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu
2) Start making the networking operate on IOV iterators instead of
modifying iov objects in-situ during transfers. Thanks to Al Viro
and Herbert Xu.
3) A set of new netlink interfaces for the TIPC stack, from Richard
Alpe.
4) Remove unnecessary looping during ipv6 routing lookups, from Martin
KaFai Lau.
5) Add PAUSE frame generation support to gianfar driver, from Matei
Pavaluca.
6) Allow for larger reordering levels in TCP, which are easily
achievable in the real world right now, from Eric Dumazet.
7) Add a variable of napi_schedule that doesn't need to disable cpu
interrupts, from Eric Dumazet.
8) Use a doubly linked list to optimize neigh_parms_release(), from
Nicolas Dichtel.
9) Various enhancements to the kernel BPF verifier, and allow eBPF
programs to actually be attached to sockets. From Alexei
Starovoitov.
10) Support TSO/LSO in sunvnet driver, from David L Stevens.
11) Allow controlling ECN usage via routing metrics, from Florian
Westphal.
12) Remote checksum offload, from Tom Herbert.
13) Add split-header receive, BQL, and xmit_more support to amd-xgbe
driver, from Thomas Lendacky.
14) Add MPLS support to openvswitch, from Simon Horman.
15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen
Klassert.
16) Do gro flushes on a per-device basis using a timer, from Eric
Dumazet. This tries to resolve the conflicting goals between the
desired handling of bulk vs. RPC-like traffic.
17) Allow userspace to ask for the CPU upon what a packet was
received/steered, via SO_INCOMING_CPU. From Eric Dumazet.
18) Limit GSO packets to half the current congestion window, from Eric
Dumazet.
19) Add a generic helper so that all drivers set their RSS keys in a
consistent way, from Eric Dumazet.
20) Add xmit_more support to enic driver, from Govindarajulu
Varadarajan.
21) Add VLAN packet scheduler action, from Jiri Pirko.
22) Support configurable RSS hash functions via ethtool, from Eyal
Perry.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits)
Fix race condition between vxlan_sock_add and vxlan_sock_release
net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header
net/mlx4: Add support for A0 steering
net/mlx4: Refactor QUERY_PORT
net/mlx4_core: Add explicit error message when rule doesn't meet configuration
net/mlx4: Add A0 hybrid steering
net/mlx4: Add mlx4_bitmap zone allocator
net/mlx4: Add a check if there are too many reserved QPs
net/mlx4: Change QP allocation scheme
net/mlx4_core: Use tasklet for user-space CQ completion events
net/mlx4_core: Mask out host side virtualization features for guests
net/mlx4_en: Set csum level for encapsulated packets
be2net: Export tunnel offloads only when a VxLAN tunnel is created
gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call
net: fec: only enable mdio interrupt before phy device link up
net: fec: clear all interrupt events to support i.MX6SX
net: fec: reset fep link status in suspend function
net: sock: fix access via invalid file descriptor
net: introduce helper macro for_each_cmsghdr
...
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 100 |
1 files changed, 48 insertions, 52 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 66d12f5b4ca8..5fde5a7f4591 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) struct i40e_virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; i40e_status err; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_VERSION) + break; + } + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_VERSION) { + if (op != I40E_VIRTCHNL_OP_VERSION) { dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - le32_to_cpu(event.desc.cookie_high)); + op); err = -EIO; goto out_alloc; } @@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - u16 len; + enum i40e_virtchnl_ops op; i40e_status err; + u16 len; len = sizeof(struct i40e_virtchnl_vf_resource) + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); - event.msg_size = len; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: Error returned from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + break; } - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { - dev_err(&adapter->pdev->dev, - "%s: Invalid response from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; - } - memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: @@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) { struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - int pairs = adapter->vsi_res->num_queue_pairs; + int pairs = adapter->num_active_queues; int i, len; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { @@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; @@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; @@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) void i40evf_request_stats(struct i40evf_adapter *adapter) { struct i40e_virtchnl_queue_select vqs; + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; @@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, "%s: Unknown event %d from pf\n", __func__, vpe->event); break; - } return; } - if (v_opcode != adapter->current_op) { - dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n", - __func__, adapter->current_op, v_opcode); - /* We're probably completely screwed at this point, but clear - * the current op and try to carry on.... - */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - return; - } + if (v_opcode != adapter->current_op) + dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n", + adapter->current_op, v_opcode); if (v_retval) { dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", __func__, v_retval, v_opcode); @@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); break; default: - dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", - __func__, v_opcode); + dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", + v_opcode); break; } /* switch v_opcode */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |