diff options
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
-rw-r--r-- | drivers/hv/channel_mgmt.c | 157 |
1 files changed, 106 insertions, 51 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 26b419203f16..f33465d78a02 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -31,6 +31,7 @@ #include <linux/completion.h> #include <linux/delay.h> #include <linux/hyperv.h> +#include <asm/mshyperv.h> #include "hyperv_vmbus.h" @@ -147,6 +148,29 @@ static const struct { { HV_RDV_GUID }, }; +/* + * The rescinded channel may be blocked waiting for a response from the host; + * take care of that. + */ +static void vmbus_rescind_cleanup(struct vmbus_channel *channel) +{ + struct vmbus_channel_msginfo *msginfo; + unsigned long flags; + + + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); + + list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, + msglistentry) { + + if (msginfo->waiting_channel == channel) { + complete(&msginfo->waitevent); + break; + } + } + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); +} + static bool is_unsupported_vmbus_devs(const uuid_le *guid) { int i; @@ -180,33 +204,34 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel) * @buf: Raw buffer channel data * * @icmsghdrp is of type &struct icmsg_hdr. - * @negop is of type &struct icmsg_negotiate. * Set up and fill in default negotiate response message. * - * The fw_version specifies the framework version that - * we can support and srv_version specifies the service - * version we can support. + * The fw_version and fw_vercnt specifies the framework version that + * we can support. + * + * The srv_version and srv_vercnt specifies the service + * versions we can support. + * + * Versions are given in decreasing order. + * + * nego_fw_version and nego_srv_version store the selected protocol versions. * * Mainly used by Hyper-V drivers. */ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, - struct icmsg_negotiate *negop, u8 *buf, - int fw_version, int srv_version) + u8 *buf, const int *fw_version, int fw_vercnt, + const int *srv_version, int srv_vercnt, + int *nego_fw_version, int *nego_srv_version) { int icframe_major, icframe_minor; int icmsg_major, icmsg_minor; int fw_major, fw_minor; int srv_major, srv_minor; - int i; + int i, j; bool found_match = false; + struct icmsg_negotiate *negop; icmsghdrp->icmsgsize = 0x10; - fw_major = (fw_version >> 16); - fw_minor = (fw_version & 0xFFFF); - - srv_major = (srv_version >> 16); - srv_minor = (srv_version & 0xFFFF); - negop = (struct icmsg_negotiate *)&buf[ sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; @@ -222,13 +247,22 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, * support. */ - for (i = 0; i < negop->icframe_vercnt; i++) { - if ((negop->icversion_data[i].major == fw_major) && - (negop->icversion_data[i].minor == fw_minor)) { - icframe_major = negop->icversion_data[i].major; - icframe_minor = negop->icversion_data[i].minor; - found_match = true; + for (i = 0; i < fw_vercnt; i++) { + fw_major = (fw_version[i] >> 16); + fw_minor = (fw_version[i] & 0xFFFF); + + for (j = 0; j < negop->icframe_vercnt; j++) { + if ((negop->icversion_data[j].major == fw_major) && + (negop->icversion_data[j].minor == fw_minor)) { + icframe_major = negop->icversion_data[j].major; + icframe_minor = negop->icversion_data[j].minor; + found_match = true; + break; + } } + + if (found_match) + break; } if (!found_match) @@ -236,14 +270,26 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, found_match = false; - for (i = negop->icframe_vercnt; - (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) { - if ((negop->icversion_data[i].major == srv_major) && - (negop->icversion_data[i].minor == srv_minor)) { - icmsg_major = negop->icversion_data[i].major; - icmsg_minor = negop->icversion_data[i].minor; - found_match = true; + for (i = 0; i < srv_vercnt; i++) { + srv_major = (srv_version[i] >> 16); + srv_minor = (srv_version[i] & 0xFFFF); + + for (j = negop->icframe_vercnt; + (j < negop->icframe_vercnt + negop->icmsg_vercnt); + j++) { + + if ((negop->icversion_data[j].major == srv_major) && + (negop->icversion_data[j].minor == srv_minor)) { + + icmsg_major = negop->icversion_data[j].major; + icmsg_minor = negop->icversion_data[j].minor; + found_match = true; + break; + } } + + if (found_match) + break; } /* @@ -260,6 +306,12 @@ fw_error: negop->icmsg_vercnt = 1; } + if (nego_fw_version) + *nego_fw_version = (icframe_major << 16) | icframe_minor; + + if (nego_srv_version) + *nego_srv_version = (icmsg_major << 16) | icmsg_minor; + negop->icversion_data[0].major = icframe_major; negop->icversion_data[0].minor = icframe_minor; negop->icversion_data[1].major = icmsg_major; @@ -280,13 +332,15 @@ static struct vmbus_channel *alloc_channel(void) if (!channel) return NULL; - channel->acquire_ring_lock = true; spin_lock_init(&channel->inbound_lock); spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->sc_list); INIT_LIST_HEAD(&channel->percpu_list); + tasklet_init(&channel->callback_event, + vmbus_on_event, (unsigned long)channel); + return channel; } @@ -295,15 +349,17 @@ static struct vmbus_channel *alloc_channel(void) */ static void free_channel(struct vmbus_channel *channel) { + tasklet_kill(&channel->callback_event); kfree(channel); } static void percpu_channel_enq(void *arg) { struct vmbus_channel *channel = arg; - int cpu = smp_processor_id(); + struct hv_per_cpu_context *hv_cpu + = this_cpu_ptr(hv_context.cpu_context); - list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); + list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); } static void percpu_channel_deq(void *arg) @@ -321,24 +377,21 @@ static void vmbus_release_relid(u32 relid) memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); msg.child_relid = relid; msg.header.msgtype = CHANNELMSG_RELID_RELEASED; - vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); + vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released), + true); } void hv_event_tasklet_disable(struct vmbus_channel *channel) { - struct tasklet_struct *tasklet; - tasklet = hv_context.event_dpc[channel->target_cpu]; - tasklet_disable(tasklet); + tasklet_disable(&channel->callback_event); } void hv_event_tasklet_enable(struct vmbus_channel *channel) { - struct tasklet_struct *tasklet; - tasklet = hv_context.event_dpc[channel->target_cpu]; - tasklet_enable(tasklet); + tasklet_enable(&channel->callback_event); /* In case there is any pending event */ - tasklet_schedule(tasklet); + tasklet_schedule(&channel->callback_event); } void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) @@ -673,9 +726,12 @@ static void vmbus_wait_for_unload(void) break; for_each_online_cpu(cpu) { - page_addr = hv_context.synic_message_page[cpu]; - msg = (struct hv_message *)page_addr + - VMBUS_MESSAGE_SINT; + struct hv_per_cpu_context *hv_cpu + = per_cpu_ptr(hv_context.cpu_context, cpu); + + page_addr = hv_cpu->synic_message_page; + msg = (struct hv_message *)page_addr + + VMBUS_MESSAGE_SINT; message_type = READ_ONCE(msg->header.message_type); if (message_type == HVMSG_NONE) @@ -699,7 +755,10 @@ static void vmbus_wait_for_unload(void) * messages after we reconnect. */ for_each_online_cpu(cpu) { - page_addr = hv_context.synic_message_page[cpu]; + struct hv_per_cpu_context *hv_cpu + = per_cpu_ptr(hv_context.cpu_context, cpu); + + page_addr = hv_cpu->synic_message_page; msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; msg->header.message_type = HVMSG_NONE; } @@ -728,7 +787,8 @@ void vmbus_initiate_unload(bool crash) init_completion(&vmbus_connection.unload_event); memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); hdr.msgtype = CHANNELMSG_UNLOAD; - vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); + vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header), + !crash); /* * vmbus_initiate_unload() is also called on crash and the crash can be @@ -759,13 +819,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) } /* - * By default we setup state to enable batched - * reading. A specific service can choose to - * disable this prior to opening the channel. - */ - newchannel->batched_reading = true; - - /* * Setup state for signalling the host. */ newchannel->sig_event = (struct hv_input_signal_event *) @@ -823,6 +876,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) channel->rescind = true; spin_unlock_irqrestore(&channel->lock, flags); + vmbus_rescind_cleanup(channel); + if (channel->device_obj) { if (channel->chn_rescind_callback) { channel->chn_rescind_callback(channel); @@ -1116,8 +1171,8 @@ int vmbus_request_offers(void) msg->msgtype = CHANNELMSG_REQUESTOFFERS; - ret = vmbus_post_msg(msg, - sizeof(struct vmbus_channel_message_header)); + ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header), + true); if (ret != 0) { pr_err("Unable to request offers - %d\n", ret); |