summaryrefslogtreecommitdiff
path: root/drivers/hv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c461
-rw-r--r--drivers/hv/channel_mgmt.c7
-rw-r--r--drivers/hv/hv.c4
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_util.c76
-rw-r--r--drivers/hv/vmbus_drv.c11
6 files changed, 357 insertions, 204 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 3ebda7707e46..fbdda9938039 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -22,20 +22,97 @@
#include "hyperv_vmbus.h"
-#define NUM_PAGES_SPANNED(addr, len) \
-((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+/*
+ * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
+ *
+ * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
+ *
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
+ * (because of the alignment requirement), however, the hypervisor only
+ * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
+ * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
+ * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
+ * total size that the guest uses minus twice of the gap size.
+ */
+static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
+{
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ return size;
+ case HV_GPADL_RING:
+ /* The size of a ringbuffer must be page-aligned */
+ BUG_ON(size % PAGE_SIZE);
+ /*
+ * Two things to notice here:
+ * 1) We're processing two ring buffers as a unit
+ * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
+ * the first guest-size page of each of the two ring buffers.
+ * So we effectively subtract out two guest-size pages, and add
+ * back two Hyper-V size pages.
+ */
+ return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ }
+ BUG();
+ return 0;
+}
-static unsigned long virt_to_hvpfn(void *addr)
+/*
+ * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
+ * HV_HYP_PAGE) in a ring gpadl based on the
+ * offset in the guest
+ *
+ * @offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ */
+static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
{
- phys_addr_t paddr;
- if (is_vmalloc_addr(addr))
- paddr = page_to_phys(vmalloc_to_page(addr)) +
- offset_in_page(addr);
- else
- paddr = __pa(addr);
+ /*
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
+ * header (because of the alignment requirement), however, the
+ * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
+ * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
+ *
+ * And to calculate the effective send offset in gpadl, we need to
+ * substract this gap.
+ */
+ return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
+}
- return paddr >> PAGE_SHIFT;
+/*
+ * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
+ * the gpadl
+ *
+ * @type: the type of the gpadl
+ * @kbuffer: the pointer to the gpadl in the guest
+ * @size: the total size (in bytes) of the gpadl
+ * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ * @i: the index
+ */
+static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset, int i)
+{
+ int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
+ unsigned long delta = 0UL;
+
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ break;
+ case HV_GPADL_RING:
+ if (i == 0)
+ delta = 0;
+ else if (i <= send_idx)
+ delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
+ else
+ delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
}
/*
@@ -112,160 +189,6 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel,
}
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
-static int __vmbus_open(struct vmbus_channel *newchannel,
- void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
-{
- struct vmbus_channel_open_channel *open_msg;
- struct vmbus_channel_msginfo *open_info = NULL;
- struct page *page = newchannel->ringbuffer_page;
- u32 send_pages, recv_pages;
- unsigned long flags;
- int err;
-
- if (userdatalen > MAX_USER_DEFINED_BYTES)
- return -EINVAL;
-
- send_pages = newchannel->ringbuffer_send_offset;
- recv_pages = newchannel->ringbuffer_pagecount - send_pages;
-
- if (newchannel->state != CHANNEL_OPEN_STATE)
- return -EINVAL;
-
- newchannel->state = CHANNEL_OPENING_STATE;
- newchannel->onchannel_callback = onchannelcallback;
- newchannel->channel_callback_context = context;
-
- err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
- if (err)
- goto error_clean_ring;
-
- err = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_pages], recv_pages);
- if (err)
- goto error_clean_ring;
-
- /* Establish the gpadl for the ring buffer */
- newchannel->ringbuffer_gpadlhandle = 0;
-
- err = vmbus_establish_gpadl(newchannel,
- page_address(newchannel->ringbuffer_page),
- (send_pages + recv_pages) << PAGE_SHIFT,
- &newchannel->ringbuffer_gpadlhandle);
- if (err)
- goto error_clean_ring;
-
- /* Create and init the channel open message */
- open_info = kmalloc(sizeof(*open_info) +
- sizeof(struct vmbus_channel_open_channel),
- GFP_KERNEL);
- if (!open_info) {
- err = -ENOMEM;
- goto error_free_gpadl;
- }
-
- init_completion(&open_info->waitevent);
- open_info->waiting_channel = newchannel;
-
- open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
- open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
- open_msg->openid = newchannel->offermsg.child_relid;
- open_msg->child_relid = newchannel->offermsg.child_relid;
- open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
- open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
-
- if (userdatalen)
- memcpy(open_msg->userdata, userdata, userdatalen);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&open_info->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- if (newchannel->rescind) {
- err = -ENODEV;
- goto error_free_info;
- }
-
- err = vmbus_post_msg(open_msg,
- sizeof(struct vmbus_channel_open_channel), true);
-
- trace_vmbus_open(open_msg, err);
-
- if (err != 0)
- goto error_clean_msglist;
-
- wait_for_completion(&open_info->waitevent);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&open_info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- if (newchannel->rescind) {
- err = -ENODEV;
- goto error_free_info;
- }
-
- if (open_info->response.open_result.status) {
- err = -EAGAIN;
- goto error_free_info;
- }
-
- newchannel->state = CHANNEL_OPENED_STATE;
- kfree(open_info);
- return 0;
-
-error_clean_msglist:
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&open_info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-error_free_info:
- kfree(open_info);
-error_free_gpadl:
- vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- newchannel->ringbuffer_gpadlhandle = 0;
-error_clean_ring:
- hv_ringbuffer_cleanup(&newchannel->outbound);
- hv_ringbuffer_cleanup(&newchannel->inbound);
- newchannel->state = CHANNEL_OPEN_STATE;
- return err;
-}
-
-/*
- * vmbus_connect_ring - Open the channel but reuse ring buffer
- */
-int vmbus_connect_ring(struct vmbus_channel *newchannel,
- void (*onchannelcallback)(void *context), void *context)
-{
- return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
-}
-EXPORT_SYMBOL_GPL(vmbus_connect_ring);
-
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel,
- u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
- void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
-{
- int err;
-
- err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
- recv_ringbuffer_size);
- if (err)
- return err;
-
- err = __vmbus_open(newchannel, userdata, userdatalen,
- onchannelcallback, context);
- if (err)
- vmbus_free_ring(newchannel);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(vmbus_open);
-
/* Used for Hyper-V Socket: a guest client's connect() to the host */
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id)
@@ -317,7 +240,8 @@ EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
-static int create_gpadl_header(void *kbuffer, u32 size,
+static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
struct vmbus_channel_msginfo **msginfo)
{
int i;
@@ -330,7 +254,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
- pagecount = size >> PAGE_SHIFT;
+ pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -357,10 +281,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * i);
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
pfnsum = pfncount;
@@ -411,8 +335,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * (pfnsum + i));
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -438,10 +362,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * i);
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
}
@@ -454,15 +378,20 @@ nomem:
}
/*
- * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
*
* @channel: a channel
+ * @type: the type of the corresponding GPADL, only meaningful for the guest.
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
+ * @send_offset: the offset (in bytes) where the send ring buffer starts,
+ * should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing
*/
-int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- u32 size, u32 *gpadl_handle)
+static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
+ u32 *gpadl_handle)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
@@ -476,7 +405,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
next_gpadl_handle =
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
- ret = create_gpadl_header(kbuffer, size, &msginfo);
+ ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
if (ret)
return ret;
@@ -557,8 +486,184 @@ cleanup:
kfree(msginfo);
return ret;
}
+
+/*
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ *
+ * @channel: a channel
+ * @kbuffer: from kmalloc or vmalloc
+ * @size: page-size multiple
+ * @gpadl_handle: some funky thing
+ */
+int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 size, u32 *gpadl_handle)
+{
+ return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
+ 0U, gpadl_handle);
+}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+static int __vmbus_open(struct vmbus_channel *newchannel,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ struct vmbus_channel_open_channel *open_msg;
+ struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
+ unsigned long flags;
+ int err;
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
+ return -EINVAL;
+
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
+ if (newchannel->state != CHANNEL_OPEN_STATE)
+ return -EINVAL;
+
+ newchannel->state = CHANNEL_OPENING_STATE;
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
+
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (err)
+ goto error_clean_ring;
+
+ err = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_pages], recv_pages);
+ if (err)
+ goto error_clean_ring;
+
+ /* Establish the gpadl for the ring buffer */
+ newchannel->ringbuffer_gpadlhandle = 0;
+
+ err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
+ page_address(newchannel->ringbuffer_page),
+ (send_pages + recv_pages) << PAGE_SHIFT,
+ newchannel->ringbuffer_send_offset << PAGE_SHIFT,
+ &newchannel->ringbuffer_gpadlhandle);
+ if (err)
+ goto error_clean_ring;
+
+ /* Create and init the channel open message */
+ open_info = kmalloc(sizeof(*open_info) +
+ sizeof(struct vmbus_channel_open_channel),
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+ goto error_free_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+ open_info->waiting_channel = newchannel;
+
+ open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
+ open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+ open_msg->openid = newchannel->offermsg.child_relid;
+ open_msg->child_relid = newchannel->offermsg.child_relid;
+ open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ /*
+ * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
+ * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
+ * here we calculate it into HV_HYP_PAGE.
+ */
+ open_msg->downstream_ringbuffer_pageoffset =
+ hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
+ open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
+
+ if (userdatalen)
+ memcpy(open_msg->userdata, userdata, userdatalen);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&open_info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_info;
+ }
+
+ err = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel), true);
+
+ trace_vmbus_open(open_msg, err);
+
+ if (err != 0)
+ goto error_clean_msglist;
+
+ wait_for_completion(&open_info->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_info;
+ }
+
+ if (open_info->response.open_result.status) {
+ err = -EAGAIN;
+ goto error_free_info;
+ }
+
+ newchannel->state = CHANNEL_OPENED_STATE;
+ kfree(open_info);
+ return 0;
+
+error_clean_msglist:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+error_free_info:
+ kfree(open_info);
+error_free_gpadl:
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
+ newchannel->ringbuffer_gpadlhandle = 0;
+error_clean_ring:
+ hv_ringbuffer_cleanup(&newchannel->outbound);
+ hv_ringbuffer_cleanup(&newchannel->inbound);
+ newchannel->state = CHANNEL_OPEN_STATE;
+ return err;
+}
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+ u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+ recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+ onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmbus_open);
+
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 591106cf58fc..1d44bb635bb8 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -731,7 +731,7 @@ static void vmbus_wait_for_unload(void)
void *page_addr;
struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- u32 message_type;
+ u32 message_type, i;
/*
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
@@ -741,8 +741,11 @@ static void vmbus_wait_for_unload(void)
* functional and vmbus_unload_response() will complete
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
+ *
+ * Wait no more than 10 seconds so that the panic path can't get
+ * hung forever in case the response message isn't seen.
*/
- while (1) {
+ for (i = 0; i < 1000; i++) {
if (completion_done(&vmbus_connection.unload_event))
break;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index da69338f92f5..410f1fab519c 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
- >> PAGE_SHIFT;
+ >> HV_HYP_PAGE_SHIFT;
hv_set_simp(simp.as_uint64);
@@ -173,7 +173,7 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
- >> PAGE_SHIFT;
+ >> HV_HYP_PAGE_SHIFT;
hv_set_siefp(siefp.as_uint64);
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index e74b144b8f3d..754d35a25a1c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -354,7 +354,7 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
- /* fallthrough */
+ fallthrough;
case KVP_OP_GET_IP_INFO:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 92ee0fe4c919..05566ecdbe4b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -282,26 +282,52 @@ static struct {
spinlock_t lock;
} host_ts;
-static struct timespec64 hv_get_adj_host_time(void)
+static inline u64 reftime_to_ns(u64 reftime)
{
- struct timespec64 ts;
- u64 newtime, reftime;
+ return (reftime - WLTIMEDELTA) * 100;
+}
+
+/*
+ * Hard coded threshold for host timesync delay: 600 seconds
+ */
+static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
+
+static int hv_get_adj_host_time(struct timespec64 *ts)
+{
+ u64 newtime, reftime, timediff_adj;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&host_ts.lock, flags);
reftime = hv_read_reference_counter();
- newtime = host_ts.host_time + (reftime - host_ts.ref_time);
- ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+
+ /*
+ * We need to let the caller know that last update from host
+ * is older than the max allowable threshold. clock_gettime()
+ * and PTP ioctl do not have a documented error that we could
+ * return for this specific case. Use ESTALE to report this.
+ */
+ timediff_adj = reftime - host_ts.ref_time;
+ if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
+ pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
+ (timediff_adj * 100));
+ ret = -ESTALE;
+ }
+
+ newtime = host_ts.host_time + timediff_adj;
+ *ts = ns_to_timespec64(reftime_to_ns(newtime));
spin_unlock_irqrestore(&host_ts.lock, flags);
- return ts;
+ return ret;
}
static void hv_set_host_time(struct work_struct *work)
{
- struct timespec64 ts = hv_get_adj_host_time();
- do_settimeofday64(&ts);
+ struct timespec64 ts;
+
+ if (!hv_get_adj_host_time(&ts))
+ do_settimeofday64(&ts);
}
/*
@@ -361,10 +387,23 @@ static void timesync_onchannelcallback(void *context)
struct ictimesync_ref_data *refdata;
u8 *time_txf_buf = util_timesynch.recv_buffer;
- vmbus_recvpacket(channel, time_txf_buf,
- HV_HYP_PAGE_SIZE, &recvlen, &requestid);
+ /*
+ * Drain the ring buffer and use the last packet to update
+ * host_ts
+ */
+ while (1) {
+ int ret = vmbus_recvpacket(channel, time_txf_buf,
+ HV_HYP_PAGE_SIZE, &recvlen,
+ &requestid);
+ if (ret) {
+ pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n",
+ ret);
+ break;
+ }
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr)];
@@ -461,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context)
}
}
+#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+
static int util_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -491,8 +533,8 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv);
- ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
- 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
if (ret)
goto error;
@@ -551,8 +593,8 @@ static int util_resume(struct hv_device *dev)
return ret;
}
- ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
- 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
return ret;
}
@@ -622,9 +664,7 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
- *ts = hv_get_adj_host_time();
-
- return 0;
+ return hv_get_adj_host_time(ts);
}
static struct ptp_clock_info ptp_hyperv_info = {
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 910b6e90866c..7b8816c2e5f9 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -83,7 +83,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
void *args)
{
- struct die_args *die = (struct die_args *)args;
+ struct die_args *die = args;
struct pt_regs *regs = die->regs;
/* Don't notify Hyper-V if the die event is other than oops */
@@ -2382,7 +2382,10 @@ static int vmbus_bus_suspend(struct device *dev)
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+ if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+ pr_err("Can not suspend due to a previous failed resuming\n");
+ return -EBUSY;
+ }
mutex_lock(&vmbus_connection.channel_mutex);
@@ -2456,7 +2459,9 @@ static int vmbus_bus_resume(struct device *dev)
vmbus_request_offers();
- wait_for_completion(&vmbus_connection.ready_for_resume_event);
+ if (wait_for_completion_timeout(
+ &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+ pr_err("Some vmbus device is missing after suspending?\n");
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);