diff options
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r-- | drivers/net/hyperv/hyperv_net.h | 60 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 73 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc_drv.c | 111 | ||||
-rw-r--r-- | drivers/net/hyperv/rndis_filter.c | 44 |
4 files changed, 119 insertions, 169 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 88ddfb92122b..0db3bd1ea06f 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -146,7 +146,6 @@ struct hv_netvsc_packet { struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; - int ring_size; u32 num_chn; u32 send_sections; u32 recv_sections; @@ -188,18 +187,22 @@ struct rndis_message; struct netvsc_device; struct net_device_context; +extern u32 netvsc_ring_bytes; +extern struct reciprocal_value netvsc_ring_reciprocal; + struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct net_device_context *ndc, +int netvsc_send(struct net_device *net, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *page_buffer, struct sk_buff *skb); -void netvsc_linkstatus_callback(struct hv_device *device_obj, +void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp); int netvsc_recv_callback(struct net_device *net, + struct netvsc_device *nvdev, struct vmbus_channel *channel, void *data, u32 len, const struct ndis_tcp_ip_checksum_info *csum_info, @@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *key); int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen); @@ -635,14 +637,27 @@ struct nvsp_message { #define NETVSC_MTU 65535 #define NETVSC_MTU_MIN ETH_MIN_MTU -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ -#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ -#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +/* Max buffer sizes allowed by a host */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16) + +#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024) + #define NETVSC_INVALID_INDEX -1 #define NETVSC_SEND_SECTION_SIZE 6144 #define NETVSC_RECV_SECTION_SIZE 1728 +/* Default size of TX buf: 1MB, RX buf: 16MB */ +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \ + / NETVSC_SEND_SECTION_SIZE) +#define NETVSC_MIN_RX_SECTIONS 10 +#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \ + / NETVSC_RECV_SECTION_SIZE) + #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 @@ -690,6 +705,7 @@ struct netvsc_ethtool_stats { unsigned long tx_busy; unsigned long tx_send_full; unsigned long rx_comp_busy; + unsigned long rx_no_memory; unsigned long stop_queue; unsigned long wake_queue; }; @@ -804,13 +820,9 @@ struct netvsc_device { struct rndis_device *extension; - int ring_size; - u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - atomic_t open_cnt; - struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; struct rcu_head rcu; @@ -1425,32 +1437,6 @@ struct rndis_message { (sizeof(msg) + (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container))) -/* get pointer to info buffer with message pointer */ -#define MESSAGE_TO_INFO_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->info_buf_offset) - -/* get pointer to status buffer with message pointer */ -#define MESSAGE_TO_STATUS_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->status_buf_offset) - -/* get pointer to OOBD buffer with message pointer */ -#define MESSAGE_TO_OOBD_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->oob_data_offset) - -/* get pointer to data buffer with message pointer */ -#define MESSAGE_TO_DATA_BUFFER(msg) \ - (((unsigned char *)(msg)) + msg->per_pkt_info_offset) - -/* get pointer to contained message from NDIS_MESSAGE pointer */ -#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \ - ((void *) &rndis_msg->msg) - -/* get pointer to contained message from NDIS_MESSAGE pointer */ -#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \ - ((void *) rndis_msg) - - - #define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container)) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index bfc79698b8f4..17e529af79dc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -31,6 +31,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/prefetch.h> +#include <linux/reciprocal_div.h> #include <asm/sync_bitops.h> @@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; - atomic_set(&net_device->open_cnt, 0); + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device, buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); + /* Legacy hosts only allow smaller receive buffer */ + if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + buf_size = min_t(unsigned int, buf_size, + NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { netdev_err(ndev, @@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device) * Get the percentage of available bytes to write in the ring. * The return value is in range from 0 to 100. */ -static inline u32 hv_ringbuf_avail_percent( - struct hv_ring_buffer_info *ring_info) +static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info) { - u32 avail_read, avail_write; - - hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write); + u32 avail_write = hv_get_bytes_to_write(ring_info); - return avail_write * 100 / ring_info->ring_datasize; + return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal); } static inline void netvsc_free_send_slot(struct netvsc_device *net_device, @@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) return NETVSC_INVALID_INDEX; } -static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, - unsigned int section_index, - u32 pend_size, - struct hv_netvsc_packet *packet, - struct rndis_message *rndis_msg, - struct hv_page_buffer *pb, - struct sk_buff *skb) +static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + unsigned int section_index, + u32 pend_size, + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer *pb, + bool xmit_more) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) + pend_size; int i; - u32 msg_size = 0; u32 padding = 0; - u32 remain = packet->total_data_buflen % net_device->pkt_align; u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : packet->page_buf_cnt; + u32 remain; /* Add padding */ - if (skb->xmit_more && remain && !packet->cp_partial) { + remain = packet->total_data_buflen & (net_device->pkt_align - 1); + if (xmit_more && remain) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 len = pb[i].len; memcpy(dest, (src + offset), len); - msg_size += len; dest += len; } - if (padding) { + if (padding) memset(dest, 0, padding); - msg_size += padding; - } - - return msg_size; } static inline int netvsc_send_pkt( @@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ -int netvsc_send(struct net_device_context *ndev_ctx, +int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, struct sk_buff *skb) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; @@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct sk_buff *msd_skb = NULL; - bool try_batch; - bool xmit_more = (skb != NULL) ? skb->xmit_more : false; + bool try_batch, xmit_more; /* If device is rescinded, return error and packet will get dropped. */ if (unlikely(!net_device || net_device->destroy)) @@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } + /* Keep aggregating only if stack says more data is coming + * and not doing mixed modes send and not flow blocked + */ + xmit_more = skb->xmit_more && + !packet->cp_partial && + !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); + if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet, rndis_msg, pb, skb); + packet, rndis_msg, pb, xmit_more); packet->send_buf_index = section_index; @@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, if (msdp->skb) dev_consume_skb_any(msdp->skb); - if (xmit_more && !packet->cp_partial) { + if (xmit_more) { msdp->skb = skb; msdp->pkt = packet; msdp->count++; @@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev, u32 buflen = vmxferpage_packet->ranges[i].byte_count; /* Pass it to the upper layer */ - status = rndis_filter_receive(ndev, net_device, device, + status = rndis_filter_receive(ndev, net_device, channel, data, buflen); } @@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *device_info) { int i, ret = 0; - int ring_size = device_info->ring_size; struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) net_device_ctx->tx_table[i] = 0; - net_device->ring_size = ring_size; - /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling */ @@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, netvsc_poll, NAPI_POLL_WEIGHT); /* Open the channel */ - ret = vmbus_open(device->channel, ring_size * PAGE_SIZE, - ring_size * PAGE_SIZE, NULL, 0, - netvsc_channel_cb, - net_device->chan_table); + ret = vmbus_open(device->channel, netvsc_ring_bytes, + netvsc_ring_bytes, NULL, 0, + netvsc_channel_cb, net_device->chan_table); if (ret != 0) { netif_napi_del(&net_device->chan_table[0].napi); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5129647d420c..c5584c2d440e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/netpoll.h> +#include <linux/reciprocal_div.h> #include <net/arp.h> #include <net/route.h> @@ -46,17 +47,15 @@ #include "hyperv_net.h" #define RING_SIZE_MIN 64 -#define NETVSC_MIN_TX_SECTIONS 10 -#define NETVSC_DEFAULT_TX 192 /* ~1M */ -#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) -static int ring_size = 128; -module_param(ring_size, int, S_IRUGO); +static unsigned int ring_size __ro_after_init = 128; +module_param(ring_size, uint, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +unsigned int netvsc_ring_bytes __ro_after_init; +struct reciprocal_value netvsc_ring_reciprocal __ro_after_init; static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | @@ -174,17 +173,15 @@ out: return ret; } -static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, - int pkt_type) +static inline void *init_ppi_data(struct rndis_message *msg, + u32 ppi_size, u32 pkt_type) { - struct rndis_packet *rndis_pkt; + struct rndis_packet *rndis_pkt = &msg->msg.pkt; struct rndis_per_packet_info *ppi; - rndis_pkt = &msg->msg.pkt; rndis_pkt->data_offset += ppi_size; - - ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt + - rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len); + ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset + + rndis_pkt->per_pkt_info_len; ppi->size = ppi_size; ppi->type = pkt_type; @@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, rndis_pkt->per_pkt_info_len += ppi_size; - return ppi; + return ppi + 1; } /* Azure hosts don't support non-TCP port numbers in hashing for fragmented @@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) int ret; unsigned int num_data_pgs; struct rndis_message *rndis_msg; - struct rndis_packet *rndis_pkt; struct net_device *vf_netdev; u32 rndis_msg_size; - struct rndis_per_packet_info *ppi; u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; @@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg = (struct rndis_message *)skb->head; - memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); - /* Add the rndis header */ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; - rndis_pkt = &rndis_msg->msg.pkt; - rndis_pkt->data_offset = sizeof(struct rndis_packet); - rndis_pkt->data_len = packet->total_data_buflen; - rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); + + rndis_msg->msg.pkt = (struct rndis_packet) { + .data_offset = sizeof(struct rndis_packet), + .data_len = packet->total_data_buflen, + .per_pkt_info_offset = sizeof(struct rndis_packet), + }; rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); hash = skb_get_hash_raw(skb); if (hash != 0 && net->real_num_tx_queues > 1) { + u32 *hash_info; + rndis_msg_size += NDIS_HASH_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, - NBL_HASH_VALUE); - *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; + hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, + NBL_HASH_VALUE); + *hash_info = hash; } if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, - IEEE_8021Q_INFO); + vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, + IEEE_8021Q_INFO); - vlan = (void *)ppi + ppi->ppi_offset; + vlan->value = 0; vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; @@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct ndis_tcp_lso_info *lso_info; rndis_msg_size += NDIS_LSO_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, - TCP_LARGESEND_PKTINFO); - - lso_info = (void *)ppi + ppi->ppi_offset; + lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, + TCP_LARGESEND_PKTINFO); + lso_info->value = 0; lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (skb->protocol == htons(ETH_P_IP)) { lso_info->lso_v2_transmit.ip_version = @@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct ndis_tcp_ip_checksum_info *csum_info; rndis_msg_size += NDIS_CSUM_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, - TCPIP_CHKSUM_PKTINFO); - - csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + - ppi->ppi_offset); + csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, + TCPIP_CHKSUM_PKTINFO); + csum_info->value = 0; csum_info->transmit.tcp_header_offset = skb_transport_offset(skb); if (skb->protocol == htons(ETH_P_IP)) { @@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); + ret = netvsc_send(net, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -658,22 +652,14 @@ no_memory: /* * netvsc_linkstatus_callback - Link up/down notification */ -void netvsc_linkstatus_callback(struct hv_device *device_obj, +void netvsc_linkstatus_callback(struct net_device *net, struct rndis_message *resp) { struct rndis_indicate_status *indicate = &resp->msg.indicate_status; - struct net_device *net; - struct net_device_context *ndev_ctx; + struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_reconfig *event; unsigned long flags; - net = hv_get_drvdata(device_obj); - - if (!net) - return; - - ndev_ctx = netdev_priv(net); - /* Update the physical link speed when changing to another vSwitch */ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; @@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, * "wire" on the specified device. */ int netvsc_recv_callback(struct net_device *net, + struct netvsc_device *net_device, struct vmbus_channel *channel, void *data, u32 len, const struct ndis_tcp_ip_checksum_info *csum_info, const struct ndis_pkt_8021q_info *vlan) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *net_device; u16 q_idx = channel->offermsg.offer.sub_channel_index; - struct netvsc_channel *nvchan; + struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; struct sk_buff *skb; struct netvsc_stats *rx_stats; if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - rcu_read_lock(); - net_device = rcu_dereference(net_device_ctx->nvdev); - if (unlikely(!net_device)) - goto drop; - - nvchan = &net_device->chan_table[q_idx]; - /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, &nvchan->napi, csum_info, vlan, data, len); if (unlikely(!skb)) { -drop: - ++net->stats.rx_dropped; + ++net_device_ctx->eth_stats.rx_no_memory; rcu_read_unlock(); return NVSP_STAT_FAIL; } @@ -804,8 +782,6 @@ drop: u64_stats_update_end(&rx_stats->syncp); napi_gro_receive(&nvchan->napi, skb); - rcu_read_unlock(); - return 0; } @@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net, memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; - device_info.ring_size = ring_size; device_info.send_sections = nvdev->send_section_cnt; device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = nvdev->recv_section_cnt; @@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.send_sections = nvdev->send_section_cnt; device_info.send_section_size = nvdev->send_section_size; @@ -1133,12 +1107,13 @@ static const struct { u16 offset; } netvsc_stats[] = { { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, - { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, + { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, + { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, }, vf_stats[] = { @@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev, memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = nvdev->num_chn; - device_info.ring_size = ring_size; device_info.send_sections = new_tx; device_info.send_section_size = nvdev->send_section_size; device_info.recv_sections = new_rx; @@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); - device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; device_info.send_sections = NETVSC_DEFAULT_TX; device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; @@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void) if (ring_size < RING_SIZE_MIN) { ring_size = RING_SIZE_MIN; - pr_info("Increased ring_size to %d (min allowed)\n", + pr_info("Increased ring_size to %u (min allowed)\n", ring_size); } - ret = vmbus_driver_register(&netvsc_drv); + netvsc_ring_bytes = ring_size * PAGE_SIZE; + netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes); + ret = vmbus_driver_register(&netvsc_drv); if (ret) return ret; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7b637c7dd1e5..c3ca191fea7f 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev, kfree(req); } -static void dump_rndis_message(struct hv_device *hv_dev, +static void dump_rndis_message(struct net_device *netdev, const struct rndis_message *rndis_msg) { - struct net_device *netdev = hv_get_drvdata(hv_dev); - switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, " @@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); int ret; /* Setup the packet to send it */ @@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; @@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) } static int rndis_filter_receive_data(struct net_device *ndev, + struct netvsc_device *nvdev, struct rndis_device *dev, struct rndis_message *msg, struct vmbus_channel *channel, @@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev, */ data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); - return netvsc_recv_callback(ndev, channel, + + return netvsc_recv_callback(ndev, nvdev, channel, data, rndis_pkt->data_len, csum_info, vlan); } int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, - struct hv_device *dev, struct vmbus_channel *channel, void *data, u32 buflen) { @@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev, } if (netif_msg_rx_status(net_device_ctx)) - dump_rndis_message(dev, rndis_msg); + dump_rndis_message(ndev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + return rndis_filter_receive_data(ndev, net_dev, + rndis_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: @@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev, case RNDIS_MSG_INDICATE: /* notification msgs */ - netvsc_linkstatus_callback(dev, rndis_msg); + netvsc_linkstatus_callback(ndev, rndis_msg); break; default: netdev_err(ndev, @@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) /* Set the channel before opening.*/ nvchan->channel = new_sc; - ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, - nvscdev->ring_size * PAGE_SIZE, NULL, 0, + ret = vmbus_open(new_sc, netvsc_ring_bytes, + netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); if (ret == 0) napi_enable(&nvchan->napi); @@ -1222,7 +1221,6 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; - const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; int i, ret; @@ -1291,14 +1289,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; - /* - * We will limit the VRSS channels to the number CPUs in the NUMA node - * the primary channel is currently bound to. - * - * This also guarantees that num_possible_rss_qs <= num_online_cpus - */ - node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); - num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask), + /* This guarantees that num_possible_rss_qs <= num_online_cpus */ + num_possible_rss_qs = min_t(u32, num_online_cpus(), rsscap.num_recv_que); net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); @@ -1362,9 +1354,6 @@ int rndis_filter_open(struct netvsc_device *nvdev) if (!nvdev) return -EINVAL; - if (atomic_inc_return(&nvdev->open_cnt) != 1) - return 0; - return rndis_filter_open_device(nvdev->extension); } @@ -1373,13 +1362,12 @@ int rndis_filter_close(struct netvsc_device *nvdev) if (!nvdev) return -EINVAL; - if (atomic_dec_return(&nvdev->open_cnt) != 0) - return 0; - return rndis_filter_close_device(nvdev->extension); } bool rndis_filter_opened(const struct netvsc_device *nvdev) { - return atomic_read(&nvdev->open_cnt) > 0; + const struct rndis_device *dev = nvdev->extension; + + return dev->state == RNDIS_DEV_DATAINITIALIZED; } |