diff options
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r-- | drivers/net/hyperv/hyperv_net.h | 33 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 217 | ||||
-rw-r--r-- | drivers/net/hyperv/netvsc_drv.c | 116 | ||||
-rw-r--r-- | drivers/net/hyperv/rndis_filter.c | 16 |
4 files changed, 294 insertions, 88 deletions
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 384ca4f4de4a..a10b31664709 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -128,9 +128,12 @@ struct ndis_tcp_ip_checksum_info; struct hv_netvsc_packet { /* Bookkeeping stuff */ u32 status; + bool part_of_skb; - struct hv_device *device; bool is_data_pkt; + bool xmit_more; /* from skb */ + bool cp_partial; /* partial copy into send buffer */ + u16 vlan_tci; u16 q_idx; @@ -145,11 +148,14 @@ struct hv_netvsc_packet { /* This points to the memory after page_buf */ struct rndis_message *rndis_msg; + u32 rmsg_size; /* RNDIS header and PPI size */ + u32 rmsg_pgcnt; /* page count of RNDIS header and PPI */ + u32 total_data_buflen; /* Points to the send/receive buffer where the ethernet frame is */ void *data; u32 page_buf_cnt; - struct hv_page_buffer page_buf[0]; + struct hv_page_buffer *page_buf; }; struct netvsc_device_info { @@ -187,6 +193,7 @@ int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); +void netvsc_xmit_completion(void *context); int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info); @@ -596,7 +603,16 @@ struct nvsp_message { #define VRSS_SEND_TAB_SIZE 16 -/* Per netvsc channel-specific */ +#define RNDIS_MAX_PKT_DEFAULT 8 +#define RNDIS_PKT_ALIGN_DEFAULT 8 + +struct multi_send_data { + spinlock_t lock; /* protect struct multi_send_data */ + struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ + u32 count; /* counter of batched packets */ +}; + +/* Per netvsc device */ struct netvsc_device { struct hv_device *dev; @@ -634,6 +650,7 @@ struct netvsc_device { struct vmbus_channel *chn_table[NR_CPUS]; u32 send_table[VRSS_SEND_TAB_SIZE]; + u32 max_chn; u32 num_chn; atomic_t queue_sends[NR_CPUS]; @@ -646,6 +663,10 @@ struct netvsc_device { unsigned char *cb_buffer; /* The sub channel callback buffer */ unsigned char *sub_cb_buf; + + struct multi_send_data msd[NR_CPUS]; + u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ + u32 pkt_align; /* alignment bytes, e.g. 8 */ }; /* NdisInitialize message */ @@ -943,6 +964,10 @@ struct ndis_tcp_lso_info { #define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ sizeof(u32)) +/* Total size of all PPI data */ +#define NDIS_ALL_PPI_SIZE (NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + \ + NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE) + /* Format of Information buffer passed in a SetRequest for the OID */ /* OID_GEN_RNDIS_CONFIG_PARAMETER. */ struct rndis_config_parameter_info { @@ -1155,6 +1180,8 @@ struct rndis_message { #define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ sizeof(union rndis_message_container)) +#define RNDIS_AND_PPI_SIZE (sizeof(struct rndis_message) + NDIS_ALL_PPI_SIZE) + #define NDIS_PACKET_TYPE_DIRECTED 0x00000001 #define NDIS_PACKET_TYPE_MULTICAST 0x00000002 #define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 208eb05446ba..2e8ad0636b46 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -37,6 +37,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) { struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); + int i; net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) @@ -53,6 +54,11 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) net_device->destroy = false; net_device->dev = device; net_device->ndev = ndev; + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + + for (i = 0; i < num_online_cpus(); i++) + spin_lock_init(&net_device->msd[i].lock); hv_set_drvdata(device, net_device); return net_device; @@ -687,14 +693,28 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, unsigned int section_index, + u32 pend_size, struct hv_netvsc_packet *packet) { char *start = net_device->send_buf; - char *dest = (start + (section_index * net_device->send_section_size)); + char *dest = start + (section_index * net_device->send_section_size) + + pend_size; int i; u32 msg_size = 0; + u32 padding = 0; + u32 remain = packet->total_data_buflen % net_device->pkt_align; + u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : + packet->page_buf_cnt; + + /* Add padding */ + if (packet->is_data_pkt && packet->xmit_more && remain && + !packet->cp_partial) { + padding = net_device->pkt_align - remain; + packet->rndis_msg->msg_len += padding; + packet->total_data_buflen += padding; + } - for (i = 0; i < packet->page_buf_cnt; i++) { + for (i = 0; i < page_count; i++) { char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT); u32 offset = packet->page_buf[i].offset; u32 len = packet->page_buf[i].len; @@ -703,79 +723,64 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, msg_size += len; dest += len; } + + if (padding) { + memset(dest, 0, padding); + msg_size += padding; + } + return msg_size; } -int netvsc_send(struct hv_device *device, - struct hv_netvsc_packet *packet) +static inline int netvsc_send_pkt( + struct hv_netvsc_packet *packet, + struct netvsc_device *net_device) { - struct netvsc_device *net_device; - int ret = 0; - struct nvsp_message sendMessage; - struct net_device *ndev; - struct vmbus_channel *out_channel = NULL; - u64 req_id; - unsigned int section_index = NETVSC_INVALID_INDEX; - u32 msg_size = 0; - struct sk_buff *skb = NULL; + struct nvsp_message nvmsg; + struct vmbus_channel *out_channel = packet->channel; u16 q_idx = packet->q_idx; + struct net_device *ndev = net_device->ndev; + u64 req_id; + int ret; + struct hv_page_buffer *pgbuf; - - net_device = get_outbound_net_device(device); - if (!net_device) - return -ENODEV; - ndev = net_device->ndev; - - sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; + nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; if (packet->is_data_pkt) { /* 0 is RMC_DATA; */ - sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0; + nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; } else { /* 1 is RMC_CONTROL; */ - sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1; - } - - /* Attempt to send via sendbuf */ - if (packet->total_data_buflen < net_device->send_section_size) { - section_index = netvsc_get_next_send_section(net_device); - if (section_index != NETVSC_INVALID_INDEX) { - msg_size = netvsc_copy_to_send_buf(net_device, - section_index, - packet); - skb = (struct sk_buff *) - (unsigned long)packet->send_completion_tid; - packet->page_buf_cnt = 0; - } + nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; } - packet->send_buf_index = section_index; - - sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index = - section_index; - sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size; + nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = + packet->send_buf_index; + if (packet->send_buf_index == NETVSC_INVALID_INDEX) + nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + else + nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = + packet->total_data_buflen; if (packet->send_completion) req_id = (ulong)packet; else req_id = 0; - out_channel = net_device->chn_table[packet->q_idx]; - if (out_channel == NULL) - out_channel = device->channel; - packet->channel = out_channel; - if (out_channel->rescind) return -ENODEV; if (packet->page_buf_cnt) { + pgbuf = packet->cp_partial ? packet->page_buf + + packet->rmsg_pgcnt : packet->page_buf; ret = vmbus_sendpacket_pagebuffer(out_channel, - packet->page_buf, + pgbuf, packet->page_buf_cnt, - &sendMessage, + &nvmsg, sizeof(struct nvsp_message), req_id); } else { - ret = vmbus_sendpacket(out_channel, &sendMessage, + ret = vmbus_sendpacket( + out_channel, &nvmsg, sizeof(struct nvsp_message), req_id, VM_PKT_DATA_INBAND, @@ -809,6 +814,121 @@ int netvsc_send(struct hv_device *device, packet, ret); } + return ret; +} + +int netvsc_send(struct hv_device *device, + struct hv_netvsc_packet *packet) +{ + struct netvsc_device *net_device; + int ret = 0, m_ret = 0; + struct vmbus_channel *out_channel; + u16 q_idx = packet->q_idx; + u32 pktlen = packet->total_data_buflen, msd_len = 0; + unsigned int section_index = NETVSC_INVALID_INDEX; + struct sk_buff *skb = NULL; + unsigned long flag; + struct multi_send_data *msdp; + struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; + bool try_batch; + + net_device = get_outbound_net_device(device); + if (!net_device) + return -ENODEV; + + out_channel = net_device->chn_table[q_idx]; + if (!out_channel) { + out_channel = device->channel; + q_idx = 0; + packet->q_idx = 0; + } + packet->channel = out_channel; + packet->send_buf_index = NETVSC_INVALID_INDEX; + packet->cp_partial = false; + + msdp = &net_device->msd[q_idx]; + + /* batch packets in send buffer if possible */ + spin_lock_irqsave(&msdp->lock, flag); + if (msdp->pkt) + msd_len = msdp->pkt->total_data_buflen; + + try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count < + net_device->max_pkt; + + if (try_batch && msd_len + pktlen + net_device->pkt_align < + net_device->send_section_size) { + section_index = msdp->pkt->send_buf_index; + + } else if (try_batch && msd_len + packet->rmsg_size < + net_device->send_section_size) { + section_index = msdp->pkt->send_buf_index; + packet->cp_partial = true; + + } else if (packet->is_data_pkt && pktlen + net_device->pkt_align < + net_device->send_section_size) { + section_index = netvsc_get_next_send_section(net_device); + if (section_index != NETVSC_INVALID_INDEX) { + msd_send = msdp->pkt; + msdp->pkt = NULL; + msdp->count = 0; + msd_len = 0; + } + } + + if (section_index != NETVSC_INVALID_INDEX) { + netvsc_copy_to_send_buf(net_device, + section_index, msd_len, + packet); + + packet->send_buf_index = section_index; + + if (packet->cp_partial) { + packet->page_buf_cnt -= packet->rmsg_pgcnt; + packet->total_data_buflen = msd_len + packet->rmsg_size; + } else { + packet->page_buf_cnt = 0; + packet->total_data_buflen += msd_len; + if (!packet->part_of_skb) { + skb = (struct sk_buff *)(unsigned long)packet-> + send_completion_tid; + packet->send_completion_tid = 0; + } + } + + if (msdp->pkt) + netvsc_xmit_completion(msdp->pkt); + + if (packet->xmit_more && !packet->cp_partial) { + msdp->pkt = packet; + msdp->count++; + } else { + cur_send = packet; + msdp->pkt = NULL; + msdp->count = 0; + } + } else { + msd_send = msdp->pkt; + msdp->pkt = NULL; + msdp->count = 0; + cur_send = packet; + } + + spin_unlock_irqrestore(&msdp->lock, flag); + + if (msd_send) { + m_ret = netvsc_send_pkt(msd_send, net_device); + + if (m_ret != 0) { + netvsc_free_send_slot(net_device, + msd_send->send_buf_index); + netvsc_xmit_completion(msd_send); + } + } + + if (cur_send) + ret = netvsc_send_pkt(cur_send, net_device); + if (ret != 0) { if (section_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, section_index); @@ -911,7 +1031,6 @@ static void netvsc_receive(struct netvsc_device *net_device, } count = vmxferpage_packet->range_cnt; - netvsc_packet->device = device; netvsc_packet->channel = channel; /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15d82eda0baf..a3a9d3898a6e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -229,16 +229,16 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, return q_idx; } -static void netvsc_xmit_completion(void *context) +void netvsc_xmit_completion(void *context) { struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; struct sk_buff *skb = (struct sk_buff *) (unsigned long)packet->send_completion_tid; - u32 index = packet->send_buf_index; - kfree(packet); + if (!packet->part_of_skb) + kfree(packet); - if (skb && (index == NETVSC_INVALID_INDEX)) + if (skb) dev_kfree_skb_any(skb); } @@ -277,15 +277,16 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, } static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, - struct hv_page_buffer *pb) + struct hv_netvsc_packet *packet) { + struct hv_page_buffer *pb = packet->page_buf; u32 slots_used = 0; char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; int i; /* The packet is laid out thus: - * 1. hdr + * 1. hdr: RNDIS header and PPI * 2. skb linear data * 3. skb fragment data */ @@ -294,6 +295,9 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, offset_in_page(hdr), len, &pb[slots_used]); + packet->rmsg_size = len; + packet->rmsg_pgcnt = slots_used; + slots_used += fill_pg_buf(virt_to_page(data), offset_in_page(data), skb_headlen(skb), &pb[slots_used]); @@ -370,50 +374,73 @@ not_ip: static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_netvsc_packet *packet; + struct hv_netvsc_packet *packet = NULL; int ret; unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; u32 rndis_msg_size; bool isvlan; + bool linear = false; struct rndis_per_packet_info *ppi; struct ndis_tcp_ip_checksum_info *csum_info; struct ndis_tcp_lso_info *lso_info; int hdr_offset; u32 net_trans_info; u32 hash; - u32 skb_length = skb->len; + u32 skb_length; + u32 head_room; + u32 pkt_sz; + struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number - * of pages in a single packet. + * of pages in a single packet. If skb is scattered around + * more pages we try linearizing it. */ + +check_size: + skb_length = skb->len; + head_room = skb_headroom(skb); num_data_pgs = netvsc_get_slots(skb) + 2; - if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { - netdev_err(net, "Packet too big: %u\n", skb->len); - dev_kfree_skb(skb); - net->stats.tx_dropped++; - return NETDEV_TX_OK; + if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { + net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", + num_data_pgs, skb->len); + ret = -EFAULT; + goto drop; + } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { + if (skb_linearize(skb)) { + net_alert_ratelimited("failed to linearize skb\n"); + ret = -ENOMEM; + goto drop; + } + linear = true; + goto check_size; } - /* Allocate a netvsc packet based on # of frags. */ - packet = kzalloc(sizeof(struct hv_netvsc_packet) + - (num_data_pgs * sizeof(struct hv_page_buffer)) + - sizeof(struct rndis_message) + - NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + - NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC); - if (!packet) { - /* out of memory, drop packet */ - netdev_err(net, "unable to allocate hv_netvsc_packet\n"); - - dev_kfree_skb(skb); - net->stats.tx_dropped++; - return NETDEV_TX_OK; + pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; + + if (head_room < pkt_sz) { + packet = kmalloc(pkt_sz, GFP_ATOMIC); + if (!packet) { + /* out of memory, drop packet */ + netdev_err(net, "unable to alloc hv_netvsc_packet\n"); + ret = -ENOMEM; + goto drop; + } + packet->part_of_skb = false; + } else { + /* Use the headroom for building up the packet */ + packet = (struct hv_netvsc_packet *)skb->head; + packet->part_of_skb = true; } + packet->status = 0; + packet->xmit_more = skb->xmit_more; + packet->vlan_tci = skb->vlan_tci; + packet->page_buf = page_buf; packet->q_idx = skb_get_queue_mapping(skb); @@ -421,8 +448,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) packet->total_data_buflen = skb->len; packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + - sizeof(struct hv_netvsc_packet) + - (num_data_pgs * sizeof(struct hv_page_buffer))); + sizeof(struct hv_netvsc_packet)); + + memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE); /* Set the completion routine */ packet->send_completion = netvsc_xmit_completion; @@ -554,7 +582,7 @@ do_send: rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, - skb, &packet->page_buf[0]); + skb, packet); ret = netvsc_send(net_device_ctx->device_ctx, packet); @@ -563,7 +591,8 @@ drop: net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { - kfree(packet); + if (packet && !packet->part_of_skb) + kfree(packet); if (ret != -EAGAIN) { dev_kfree_skb_any(skb); net->stats.tx_dropped++; @@ -687,6 +716,19 @@ static void netvsc_get_drvinfo(struct net_device *net, strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } +static void netvsc_get_channels(struct net_device *net, + struct ethtool_channels *channel) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(dev); + + if (nvdev) { + channel->max_combined = nvdev->max_chn; + channel->combined_count = nvdev->num_chn; + } +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -760,6 +802,7 @@ static void netvsc_poll_controller(struct net_device *net) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_channels = netvsc_get_channels, }; static const struct net_device_ops device_ops = { @@ -831,12 +874,16 @@ static int netvsc_probe(struct hv_device *dev, struct netvsc_device_info device_info; struct netvsc_device *nvdev; int ret; + u32 max_needed_headroom; net = alloc_etherdev_mq(sizeof(struct net_device_context), num_online_cpus()); if (!net) return -ENOMEM; + max_needed_headroom = sizeof(struct hv_netvsc_packet) + + RNDIS_AND_PPI_SIZE; + netif_carrier_off(net); net_device_ctx = netdev_priv(net); @@ -855,6 +902,13 @@ static int netvsc_probe(struct hv_device *dev, net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); + /* + * Request additional head room in the skb. + * We will use this space to build the rndis + * heaser and other state we need to maintain. + */ + net->needed_headroom = max_needed_headroom; + /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; ret = rndis_filter_device_add(dev, &device_info); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7816d98bdddc..0d92efefd796 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -47,8 +47,6 @@ struct rndis_request { /* Simplify allocation by having a netvsc packet inline */ struct hv_netvsc_packet pkt; - /* Set 2 pages for rndis requests crossing page boundary */ - struct hv_page_buffer buf[2]; struct rndis_message request_msg; /* @@ -210,6 +208,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, { int ret; struct hv_netvsc_packet *packet; + struct hv_page_buffer page_buf[2]; /* Setup the packet to send it */ packet = &req->pkt; @@ -217,6 +216,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, packet->is_data_pkt = false; packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; + packet->page_buf = page_buf; packet->page_buf[0].pfn = virt_to_phys(&req->request_msg) >> PAGE_SHIFT; @@ -237,6 +237,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } packet->send_completion = NULL; + packet->xmit_more = false; ret = netvsc_send(dev->net_dev->dev, packet); return ret; @@ -855,6 +856,7 @@ static int rndis_filter_init_device(struct rndis_device *dev) u32 status; int ret; unsigned long t; + struct netvsc_device *nvdev = dev->net_dev; request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -889,6 +891,8 @@ static int rndis_filter_init_device(struct rndis_device *dev) status = init_complete->status; if (status == RNDIS_STATUS_SUCCESS) { dev->state = RNDIS_DEV_INITIALIZED; + nvdev->max_pkt = init_complete->max_pkt_per_msg; + nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor; ret = 0; } else { dev->state = RNDIS_DEV_UNINITIALIZED; @@ -1027,6 +1031,7 @@ int rndis_filter_device_add(struct hv_device *dev, /* Initialize the rndis device */ net_device = hv_get_drvdata(dev); + net_device->max_chn = 1; net_device->num_chn = 1; net_device->extension = rndis_device; @@ -1094,6 +1099,7 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + net_device->max_chn = rsscap.num_recv_que; net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? num_online_cpus() : rsscap.num_recv_que; if (net_device->num_chn == 1) @@ -1135,13 +1141,13 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; - vmbus_are_subchannels_present(dev->channel); - ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); out: - if (ret) + if (ret) { + net_device->max_chn = 1; net_device->num_chn = 1; + } return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: |