diff options
Diffstat (limited to 'drivers/hv')
-rw-r--r-- | drivers/hv/channel.c | 50 | ||||
-rw-r--r-- | drivers/hv/channel_mgmt.c | 121 | ||||
-rw-r--r-- | drivers/hv/connection.c | 14 | ||||
-rw-r--r-- | drivers/hv/hv.c | 85 | ||||
-rw-r--r-- | drivers/hv/hyperv_vmbus.h | 4 | ||||
-rw-r--r-- | drivers/hv/ring_buffer.c | 12 | ||||
-rw-r--r-- | drivers/hv/vmbus_drv.c | 10 |
7 files changed, 247 insertions, 49 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 0b122f8c7005..6de6c98ce6eb 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -116,6 +116,15 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, unsigned long flags; int ret, t, err = 0; + spin_lock_irqsave(&newchannel->sc_lock, flags); + if (newchannel->state == CHANNEL_OPEN_STATE) { + newchannel->state = CHANNEL_OPENING_STATE; + } else { + spin_unlock_irqrestore(&newchannel->sc_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&newchannel->sc_lock, flags); + newchannel->onchannel_callback = onchannelcallback; newchannel->channel_callback_context = context; @@ -216,6 +225,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, list_del(&open_info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); + if (err == 0) + newchannel->state = CHANNEL_OPENED_STATE; + kfree(open_info); return err; @@ -500,15 +512,14 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) } EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); -/* - * vmbus_close - Close the specified channel - */ -void vmbus_close(struct vmbus_channel *channel) +static void vmbus_close_internal(struct vmbus_channel *channel) { struct vmbus_channel_close_channel *msg; int ret; unsigned long flags; + channel->state = CHANNEL_OPEN_STATE; + channel->sc_creation_callback = NULL; /* Stop callback and cancel the timer asap */ spin_lock_irqsave(&channel->inbound_lock, flags); channel->onchannel_callback = NULL; @@ -538,6 +549,37 @@ void vmbus_close(struct vmbus_channel *channel) } + +/* + * vmbus_close - Close the specified channel + */ +void vmbus_close(struct vmbus_channel *channel) +{ + struct list_head *cur, *tmp; + struct vmbus_channel *cur_channel; + + if (channel->primary_channel != NULL) { + /* + * We will only close sub-channels when + * the primary is closed. + */ + return; + } + /* + * Close all the sub-channels first and then close the + * primary channel. + */ + list_for_each_safe(cur, tmp, &channel->sc_list) { + cur_channel = list_entry(cur, struct vmbus_channel, sc_list); + if (cur_channel->state != CHANNEL_OPENED_STATE) + continue; + vmbus_close_internal(cur_channel); + } + /* + * Now close the primary. + */ + vmbus_close_internal(channel); +} EXPORT_SYMBOL_GPL(vmbus_close); /** diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bad8128b283a..0df75908200e 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -115,6 +115,9 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->inbound_lock); + spin_lock_init(&channel->sc_lock); + + INIT_LIST_HEAD(&channel->sc_list); channel->controlwq = create_workqueue("hv_vmbus_ctl"); if (!channel->controlwq) { @@ -166,6 +169,7 @@ static void vmbus_process_rescind_offer(struct work_struct *work) struct vmbus_channel, work); unsigned long flags; + struct vmbus_channel *primary_channel; struct vmbus_channel_relid_released msg; vmbus_device_unregister(channel->device_obj); @@ -174,9 +178,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work) msg.header.msgtype = CHANNELMSG_RELID_RELEASED; vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); - list_del(&channel->listentry); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + if (channel->primary_channel == NULL) { + spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + list_del(&channel->listentry); + spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + } else { + primary_channel = channel->primary_channel; + spin_lock_irqsave(&primary_channel->sc_lock, flags); + list_del(&channel->listentry); + spin_unlock_irqrestore(&primary_channel->sc_lock, flags); + } free_channel(channel); } @@ -228,6 +239,24 @@ static void vmbus_process_offer(struct work_struct *work) spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); if (!fnew) { + /* + * Check to see if this is a sub-channel. + */ + if (newchannel->offermsg.offer.sub_channel_index != 0) { + /* + * Process the sub-channel. + */ + newchannel->primary_channel = channel; + spin_lock_irqsave(&channel->sc_lock, flags); + list_add_tail(&newchannel->sc_list, &channel->sc_list); + spin_unlock_irqrestore(&channel->sc_lock, flags); + newchannel->state = CHANNEL_OPEN_STATE; + if (channel->sc_creation_callback != NULL) + channel->sc_creation_callback(newchannel); + + return; + } + free_channel(newchannel); return; } @@ -329,7 +358,7 @@ static u32 get_vp_index(uuid_le *type_guid) return 0; } cur_cpu = (++next_vp % max_cpus); - return cur_cpu; + return hv_context.vp_index[cur_cpu]; } /* @@ -685,4 +714,86 @@ cleanup: return ret; } -/* eof */ +/* + * Retrieve the (sub) channel on which to send an outgoing request. + * When a primary channel has multiple sub-channels, we choose a + * channel whose VCPU binding is closest to the VCPU on which + * this call is being made. + */ +struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) +{ + struct list_head *cur, *tmp; + int cur_cpu = hv_context.vp_index[smp_processor_id()]; + struct vmbus_channel *cur_channel; + struct vmbus_channel *outgoing_channel = primary; + int cpu_distance, new_cpu_distance; + + if (list_empty(&primary->sc_list)) + return outgoing_channel; + + list_for_each_safe(cur, tmp, &primary->sc_list) { + cur_channel = list_entry(cur, struct vmbus_channel, sc_list); + if (cur_channel->state != CHANNEL_OPENED_STATE) + continue; + + if (cur_channel->target_vp == cur_cpu) + return cur_channel; + + cpu_distance = ((outgoing_channel->target_vp > cur_cpu) ? + (outgoing_channel->target_vp - cur_cpu) : + (cur_cpu - outgoing_channel->target_vp)); + + new_cpu_distance = ((cur_channel->target_vp > cur_cpu) ? + (cur_channel->target_vp - cur_cpu) : + (cur_cpu - cur_channel->target_vp)); + + if (cpu_distance < new_cpu_distance) + continue; + + outgoing_channel = cur_channel; + } + + return outgoing_channel; +} +EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); + +static void invoke_sc_cb(struct vmbus_channel *primary_channel) +{ + struct list_head *cur, *tmp; + struct vmbus_channel *cur_channel; + + if (primary_channel->sc_creation_callback == NULL) + return; + + list_for_each_safe(cur, tmp, &primary_channel->sc_list) { + cur_channel = list_entry(cur, struct vmbus_channel, sc_list); + + primary_channel->sc_creation_callback(cur_channel); + } +} + +void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, + void (*sc_cr_cb)(struct vmbus_channel *new_sc)) +{ + primary_channel->sc_creation_callback = sc_cr_cb; +} +EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback); + +bool vmbus_are_subchannels_present(struct vmbus_channel *primary) +{ + bool ret; + + ret = !list_empty(&primary->sc_list); + + if (ret) { + /* + * Invoke the callback on sub-channel creation. + * This will present a uniform interface to the + * clients. + */ + invoke_sc_cb(primary); + } + + return ret; +} +EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 253a74ba245c..ec3b8cdf1e04 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -246,12 +246,26 @@ struct vmbus_channel *relid2channel(u32 relid) struct vmbus_channel *channel; struct vmbus_channel *found_channel = NULL; unsigned long flags; + struct list_head *cur, *tmp; + struct vmbus_channel *cur_sc; spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (channel->offermsg.child_relid == relid) { found_channel = channel; break; + } else if (!list_empty(&channel->sc_list)) { + /* + * Deal with sub-channels. + */ + list_for_each_safe(cur, tmp, &channel->sc_list) { + cur_sc = list_entry(cur, struct vmbus_channel, + sc_list); + if (cur_sc->offermsg.child_relid == relid) { + found_channel = cur_sc; + break; + } + } } } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index ae4923756d98..88f4096fa078 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -265,6 +265,59 @@ u16 hv_signal_event(void *con_id) return status; } + +int hv_synic_alloc(void) +{ + size_t size = sizeof(struct tasklet_struct); + int cpu; + + for_each_online_cpu(cpu) { + hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); + if (hv_context.event_dpc[cpu] == NULL) { + pr_err("Unable to allocate event dpc\n"); + goto err; + } + tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); + + hv_context.synic_message_page[cpu] = + (void *)get_zeroed_page(GFP_ATOMIC); + + if (hv_context.synic_message_page[cpu] == NULL) { + pr_err("Unable to allocate SYNIC message page\n"); + goto err; + } + + hv_context.synic_event_page[cpu] = + (void *)get_zeroed_page(GFP_ATOMIC); + + if (hv_context.synic_event_page[cpu] == NULL) { + pr_err("Unable to allocate SYNIC event page\n"); + goto err; + } + } + + return 0; +err: + return -ENOMEM; +} + +void hv_synic_free_cpu(int cpu) +{ + kfree(hv_context.event_dpc[cpu]); + if (hv_context.synic_message_page[cpu]) + free_page((unsigned long)hv_context.synic_event_page[cpu]); + if (hv_context.synic_message_page[cpu]) + free_page((unsigned long)hv_context.synic_message_page[cpu]); +} + +void hv_synic_free(void) +{ + int cpu; + + for_each_online_cpu(cpu) + hv_synic_free_cpu(cpu); +} + /* * hv_synic_init - Initialize the Synthethic Interrupt Controller. * @@ -289,30 +342,6 @@ void hv_synic_init(void *arg) /* Check the version */ rdmsrl(HV_X64_MSR_SVERSION, version); - hv_context.event_dpc[cpu] = kmalloc(sizeof(struct tasklet_struct), - GFP_ATOMIC); - if (hv_context.event_dpc[cpu] == NULL) { - pr_err("Unable to allocate event dpc\n"); - goto cleanup; - } - tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); - - hv_context.synic_message_page[cpu] = - (void *)get_zeroed_page(GFP_ATOMIC); - - if (hv_context.synic_message_page[cpu] == NULL) { - pr_err("Unable to allocate SYNIC message page\n"); - goto cleanup; - } - - hv_context.synic_event_page[cpu] = - (void *)get_zeroed_page(GFP_ATOMIC); - - if (hv_context.synic_event_page[cpu] == NULL) { - pr_err("Unable to allocate SYNIC event page\n"); - goto cleanup; - } - /* Setup the Synic's message page */ rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); simp.simp_enabled = 1; @@ -355,14 +384,6 @@ void hv_synic_init(void *arg) rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); hv_context.vp_index[cpu] = (u32)vp_index; return; - -cleanup: - if (hv_context.synic_event_page[cpu]) - free_page((unsigned long)hv_context.synic_event_page[cpu]); - - if (hv_context.synic_message_page[cpu]) - free_page((unsigned long)hv_context.synic_message_page[cpu]); - return; } /* diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 12f2f9e989f7..d84918fe19ab 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -527,6 +527,10 @@ extern int hv_post_message(union hv_connection_id connection_id, extern u16 hv_signal_event(void *con_id); +extern int hv_synic_alloc(void); + +extern void hv_synic_free(void); + extern void hv_synic_init(void *irqarg); extern void hv_synic_cleanup(void *arg); diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index d6fbb5772b8d..26c93cf9f6be 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -32,7 +32,7 @@ void hv_begin_read(struct hv_ring_buffer_info *rbi) { rbi->ring_buffer->interrupt_mask = 1; - smp_mb(); + mb(); } u32 hv_end_read(struct hv_ring_buffer_info *rbi) @@ -41,7 +41,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) u32 write; rbi->ring_buffer->interrupt_mask = 0; - smp_mb(); + mb(); /* * Now check to see if the ring buffer is still empty. @@ -71,10 +71,12 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) { - smp_mb(); + mb(); if (rbi->ring_buffer->interrupt_mask) return false; + /* check interrupt_mask before read_index */ + rmb(); /* * This is the only case we need to signal when the * ring transitions from being empty to non-empty. @@ -442,7 +444,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, sizeof(u64)); /* Issue a full memory barrier before updating the write index */ - smp_mb(); + mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); @@ -549,7 +551,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, /* Make sure all reads are done before we update the read index since */ /* the writer may start writing to the read area once the read index */ /*is updated */ - smp_mb(); + mb(); /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index bf421e0efa1e..a2464bf07c49 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -434,7 +434,7 @@ static void vmbus_on_msg_dpc(unsigned long data) * will not deliver any more messages since there is * no empty slot */ - smp_mb(); + mb(); if (msg->header.message_flags.msg_pending) { /* @@ -563,6 +563,9 @@ static int vmbus_bus_init(int irq) */ hv_register_vmbus_handler(irq, vmbus_isr); + ret = hv_synic_alloc(); + if (ret) + goto err_alloc; /* * Initialize the per-cpu interrupt state and * connect to the host. @@ -570,13 +573,14 @@ static int vmbus_bus_init(int irq) on_each_cpu(hv_synic_init, NULL, 1); ret = vmbus_connect(); if (ret) - goto err_irq; + goto err_alloc; vmbus_request_offers(); return 0; -err_irq: +err_alloc: + hv_synic_free(); free_irq(irq, hv_acpi_dev); err_unregister: |