summaryrefslogtreecommitdiff
path: root/drivers/virt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virt')
-rw-r--r--drivers/virt/Kconfig2
-rw-r--r--drivers/virt/Makefile1
-rw-r--r--drivers/virt/acrn/Kconfig15
-rw-r--r--drivers/virt/acrn/Makefile3
-rw-r--r--drivers/virt/acrn/acrn_drv.h227
-rw-r--r--drivers/virt/acrn/hsm.c470
-rw-r--r--drivers/virt/acrn/hypercall.h254
-rw-r--r--drivers/virt/acrn/ioeventfd.c273
-rw-r--r--drivers/virt/acrn/ioreq.c657
-rw-r--r--drivers/virt/acrn/irqfd.c235
-rw-r--r--drivers/virt/acrn/mm.c306
-rw-r--r--drivers/virt/acrn/vm.c126
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c18
13 files changed, 2581 insertions, 6 deletions
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index 80c5f9c16ec1..8061e8ef449f 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -34,4 +34,6 @@ config FSL_HV_MANAGER
source "drivers/virt/vboxguest/Kconfig"
source "drivers/virt/nitro_enclaves/Kconfig"
+
+source "drivers/virt/acrn/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index f28425ce4b39..3e272ea60cd9 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
obj-y += vboxguest/
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
+obj-$(CONFIG_ACRN_HSM) += acrn/
diff --git a/drivers/virt/acrn/Kconfig b/drivers/virt/acrn/Kconfig
new file mode 100644
index 000000000000..3e1a61c9d8d8
--- /dev/null
+++ b/drivers/virt/acrn/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config ACRN_HSM
+ tristate "ACRN Hypervisor Service Module"
+ depends on ACRN_GUEST
+ select EVENTFD
+ help
+ ACRN Hypervisor Service Module (HSM) is a kernel module which
+ communicates with ACRN userspace through ioctls and talks to
+ the ACRN Hypervisor through hypercalls. HSM will only run in
+ a privileged management VM, called Service VM, to manage User
+ VMs and do I/O emulation. Not required for simply running
+ under ACRN as a User VM.
+
+ To compile as a module, choose M, the module will be called
+ acrn. If unsure, say N.
diff --git a/drivers/virt/acrn/Makefile b/drivers/virt/acrn/Makefile
new file mode 100644
index 000000000000..08ce641dcfa1
--- /dev/null
+++ b/drivers/virt/acrn/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ACRN_HSM) := acrn.o
+acrn-y := hsm.o vm.o mm.o ioreq.o ioeventfd.o irqfd.o
diff --git a/drivers/virt/acrn/acrn_drv.h b/drivers/virt/acrn/acrn_drv.h
new file mode 100644
index 000000000000..1be54efa666c
--- /dev/null
+++ b/drivers/virt/acrn/acrn_drv.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ACRN_HSM_DRV_H
+#define __ACRN_HSM_DRV_H
+
+#include <linux/acrn.h>
+#include <linux/dev_printk.h>
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+
+#include "hypercall.h"
+
+extern struct miscdevice acrn_dev;
+
+#define ACRN_NAME_LEN 16
+#define ACRN_MEM_MAPPING_MAX 256
+
+#define ACRN_MEM_REGION_ADD 0
+#define ACRN_MEM_REGION_DEL 2
+
+struct acrn_vm;
+struct acrn_ioreq_client;
+
+/**
+ * struct vm_memory_region_op - Hypervisor memory operation
+ * @type: Operation type (ACRN_MEM_REGION_*)
+ * @attr: Memory attribute (ACRN_MEM_TYPE_* | ACRN_MEM_ACCESS_*)
+ * @user_vm_pa: Physical address of User VM to be mapped.
+ * @service_vm_pa: Physical address of Service VM to be mapped.
+ * @size: Size of this region.
+ *
+ * Structure containing needed information that is provided to ACRN Hypervisor
+ * to manage the EPT mappings of a single memory region of the User VM. Several
+ * &struct vm_memory_region_op can be batched to ACRN Hypervisor, see &struct
+ * vm_memory_region_batch.
+ */
+struct vm_memory_region_op {
+ u32 type;
+ u32 attr;
+ u64 user_vm_pa;
+ u64 service_vm_pa;
+ u64 size;
+};
+
+/**
+ * struct vm_memory_region_batch - A batch of vm_memory_region_op.
+ * @vmid: A User VM ID.
+ * @reserved: Reserved.
+ * @regions_num: The number of vm_memory_region_op.
+ * @regions_gpa: Physical address of a vm_memory_region_op array.
+ *
+ * HC_VM_SET_MEMORY_REGIONS uses this structure to manage EPT mappings of
+ * multiple memory regions of a User VM. A &struct vm_memory_region_batch
+ * contains multiple &struct vm_memory_region_op for batch processing in the
+ * ACRN Hypervisor.
+ */
+struct vm_memory_region_batch {
+ u16 vmid;
+ u16 reserved[3];
+ u32 regions_num;
+ u64 regions_gpa;
+};
+
+/**
+ * struct vm_memory_mapping - Memory map between a User VM and the Service VM
+ * @pages: Pages in Service VM kernel.
+ * @npages: Number of pages.
+ * @service_vm_va: Virtual address in Service VM kernel.
+ * @user_vm_pa: Physical address in User VM.
+ * @size: Size of this memory region.
+ *
+ * HSM maintains memory mappings between a User VM GPA and the Service VM
+ * kernel VA for accelerating the User VM GPA translation.
+ */
+struct vm_memory_mapping {
+ struct page **pages;
+ int npages;
+ void *service_vm_va;
+ u64 user_vm_pa;
+ size_t size;
+};
+
+/**
+ * struct acrn_ioreq_buffer - Data for setting the ioreq buffer of User VM
+ * @ioreq_buf: The GPA of the IO request shared buffer of a VM
+ *
+ * The parameter for the HC_SET_IOREQ_BUFFER hypercall used to set up
+ * the shared I/O request buffer between Service VM and ACRN hypervisor.
+ */
+struct acrn_ioreq_buffer {
+ u64 ioreq_buf;
+};
+
+struct acrn_ioreq_range {
+ struct list_head list;
+ u32 type;
+ u64 start;
+ u64 end;
+};
+
+#define ACRN_IOREQ_CLIENT_DESTROYING 0U
+typedef int (*ioreq_handler_t)(struct acrn_ioreq_client *client,
+ struct acrn_io_request *req);
+/**
+ * struct acrn_ioreq_client - Structure of I/O client.
+ * @name: Client name
+ * @vm: The VM that the client belongs to
+ * @list: List node for this acrn_ioreq_client
+ * @is_default: If this client is the default one
+ * @flags: Flags (ACRN_IOREQ_CLIENT_*)
+ * @range_list: I/O ranges
+ * @range_lock: Lock to protect range_list
+ * @ioreqs_map: The pending I/O requests bitmap.
+ * @handler: I/O requests handler of this client
+ * @thread: The thread which executes the handler
+ * @wq: The wait queue for the handler thread parking
+ * @priv: Data for the thread
+ */
+struct acrn_ioreq_client {
+ char name[ACRN_NAME_LEN];
+ struct acrn_vm *vm;
+ struct list_head list;
+ bool is_default;
+ unsigned long flags;
+ struct list_head range_list;
+ rwlock_t range_lock;
+ DECLARE_BITMAP(ioreqs_map, ACRN_IO_REQUEST_MAX);
+ ioreq_handler_t handler;
+ struct task_struct *thread;
+ wait_queue_head_t wq;
+ void *priv;
+};
+
+#define ACRN_INVALID_VMID (0xffffU)
+
+#define ACRN_VM_FLAG_DESTROYED 0U
+#define ACRN_VM_FLAG_CLEARING_IOREQ 1U
+extern struct list_head acrn_vm_list;
+extern rwlock_t acrn_vm_list_lock;
+/**
+ * struct acrn_vm - Properties of ACRN User VM.
+ * @list: Entry within global list of all VMs.
+ * @vmid: User VM ID.
+ * @vcpu_num: Number of virtual CPUs in the VM.
+ * @flags: Flags (ACRN_VM_FLAG_*) of the VM. This is VM
+ * flag management in HSM which is different
+ * from the &acrn_vm_creation.vm_flag.
+ * @regions_mapping_lock: Lock to protect &acrn_vm.regions_mapping and
+ * &acrn_vm.regions_mapping_count.
+ * @regions_mapping: Memory mappings of this VM.
+ * @regions_mapping_count: Number of memory mapping of this VM.
+ * @ioreq_clients_lock: Lock to protect ioreq_clients and default_client
+ * @ioreq_clients: The I/O request clients list of this VM
+ * @default_client: The default I/O request client
+ * @ioreq_buf: I/O request shared buffer
+ * @ioreq_page: The page of the I/O request shared buffer
+ * @pci_conf_addr: Address of a PCI configuration access emulation
+ * @monitor_page: Page of interrupt statistics of User VM
+ * @ioeventfds_lock: Lock to protect ioeventfds list
+ * @ioeventfds: List to link all hsm_ioeventfd
+ * @ioeventfd_client: I/O client for ioeventfds of the VM
+ * @irqfds_lock: Lock to protect irqfds list
+ * @irqfds: List to link all hsm_irqfd
+ * @irqfd_wq: Workqueue for irqfd async shutdown
+ */
+struct acrn_vm {
+ struct list_head list;
+ u16 vmid;
+ int vcpu_num;
+ unsigned long flags;
+ struct mutex regions_mapping_lock;
+ struct vm_memory_mapping regions_mapping[ACRN_MEM_MAPPING_MAX];
+ int regions_mapping_count;
+ spinlock_t ioreq_clients_lock;
+ struct list_head ioreq_clients;
+ struct acrn_ioreq_client *default_client;
+ struct acrn_io_request_buffer *ioreq_buf;
+ struct page *ioreq_page;
+ u32 pci_conf_addr;
+ struct page *monitor_page;
+ struct mutex ioeventfds_lock;
+ struct list_head ioeventfds;
+ struct acrn_ioreq_client *ioeventfd_client;
+ struct mutex irqfds_lock;
+ struct list_head irqfds;
+ struct workqueue_struct *irqfd_wq;
+};
+
+struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
+ struct acrn_vm_creation *vm_param);
+int acrn_vm_destroy(struct acrn_vm *vm);
+int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
+ u64 size, u32 mem_type, u32 mem_access_right);
+int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size);
+int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap);
+void acrn_vm_all_ram_unmap(struct acrn_vm *vm);
+
+int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma);
+void acrn_ioreq_deinit(struct acrn_vm *vm);
+int acrn_ioreq_intr_setup(void);
+void acrn_ioreq_intr_remove(void);
+void acrn_ioreq_request_clear(struct acrn_vm *vm);
+int acrn_ioreq_client_wait(struct acrn_ioreq_client *client);
+int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu);
+struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
+ ioreq_handler_t handler,
+ void *data, bool is_default,
+ const char *name);
+void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client);
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end);
+
+int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data);
+
+int acrn_ioeventfd_init(struct acrn_vm *vm);
+int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args);
+void acrn_ioeventfd_deinit(struct acrn_vm *vm);
+
+int acrn_irqfd_init(struct acrn_vm *vm);
+int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args);
+void acrn_irqfd_deinit(struct acrn_vm *vm);
+
+#endif /* __ACRN_HSM_DRV_H */
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
new file mode 100644
index 000000000000..1f6b7c54a1a4
--- /dev/null
+++ b/drivers/virt/acrn/hsm.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN Hypervisor Service Module (HSM)
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Fengwei Yin <fengwei.yin@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <asm/acrn.h>
+#include <asm/hypervisor.h>
+
+#include "acrn_drv.h"
+
+/*
+ * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
+ * represent a VM instance and continues to be associated with the opened file
+ * descriptor. All ioctl operations on this file descriptor will be targeted to
+ * the VM instance. Release of this file descriptor will destroy the object.
+ */
+static int acrn_dev_open(struct inode *inode, struct file *filp)
+{
+ struct acrn_vm *vm;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ vm->vmid = ACRN_INVALID_VMID;
+ filp->private_data = vm;
+ return 0;
+}
+
+static int pmcmd_ioctl(u64 cmd, void __user *uptr)
+{
+ struct acrn_pstate_data *px_data;
+ struct acrn_cstate_data *cx_data;
+ u64 *pm_info;
+ int ret = 0;
+
+ switch (cmd & PMCMD_TYPE_MASK) {
+ case ACRN_PMCMD_GET_PX_CNT:
+ case ACRN_PMCMD_GET_CX_CNT:
+ pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
+ if (ret < 0) {
+ kfree(pm_info);
+ break;
+ }
+
+ if (copy_to_user(uptr, pm_info, sizeof(u64)))
+ ret = -EFAULT;
+ kfree(pm_info);
+ break;
+ case ACRN_PMCMD_GET_PX_DATA:
+ px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ if (!px_data)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
+ if (ret < 0) {
+ kfree(px_data);
+ break;
+ }
+
+ if (copy_to_user(uptr, px_data, sizeof(*px_data)))
+ ret = -EFAULT;
+ kfree(px_data);
+ break;
+ case ACRN_PMCMD_GET_CX_DATA:
+ cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ if (!cx_data)
+ return -ENOMEM;
+
+ ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
+ if (ret < 0) {
+ kfree(cx_data);
+ break;
+ }
+
+ if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
+ ret = -EFAULT;
+ kfree(cx_data);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * HSM relies on hypercall layer of the ACRN hypervisor to do the
+ * sanity check against the input parameters.
+ */
+static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long ioctl_param)
+{
+ struct acrn_vm *vm = filp->private_data;
+ struct acrn_vm_creation *vm_param;
+ struct acrn_vcpu_regs *cpu_regs;
+ struct acrn_ioreq_notify notify;
+ struct acrn_ptdev_irq *irq_info;
+ struct acrn_ioeventfd ioeventfd;
+ struct acrn_vm_memmap memmap;
+ struct acrn_msi_entry *msi;
+ struct acrn_pcidev *pcidev;
+ struct acrn_irqfd irqfd;
+ struct page *page;
+ u64 cstate_cmd;
+ int i, ret = 0;
+
+ if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
+ dev_dbg(acrn_dev.this_device,
+ "ioctl 0x%x: Invalid VM state!\n", cmd);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case ACRN_IOCTL_CREATE_VM:
+ vm_param = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_vm_creation));
+ if (IS_ERR(vm_param))
+ return PTR_ERR(vm_param);
+
+ if ((vm_param->reserved0 | vm_param->reserved1) != 0)
+ return -EINVAL;
+
+ vm = acrn_vm_create(vm, vm_param);
+ if (!vm) {
+ ret = -EINVAL;
+ kfree(vm_param);
+ break;
+ }
+
+ if (copy_to_user((void __user *)ioctl_param, vm_param,
+ sizeof(struct acrn_vm_creation))) {
+ acrn_vm_destroy(vm);
+ ret = -EFAULT;
+ }
+
+ kfree(vm_param);
+ break;
+ case ACRN_IOCTL_START_VM:
+ ret = hcall_start_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to start VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_PAUSE_VM:
+ ret = hcall_pause_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to pause VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_RESET_VM:
+ ret = hcall_reset_vm(vm->vmid);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to restart VM %u!\n", vm->vmid);
+ break;
+ case ACRN_IOCTL_DESTROY_VM:
+ ret = acrn_vm_destroy(vm);
+ break;
+ case ACRN_IOCTL_SET_VCPU_REGS:
+ cpu_regs = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_vcpu_regs));
+ if (IS_ERR(cpu_regs))
+ return PTR_ERR(cpu_regs);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
+ if (cpu_regs->reserved[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
+ if (cpu_regs->vcpu_regs.reserved_32[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
+ if (cpu_regs->vcpu_regs.reserved_64[i])
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
+ if (cpu_regs->vcpu_regs.gdt.reserved[i] |
+ cpu_regs->vcpu_regs.idt.reserved[i])
+ return -EINVAL;
+
+ ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set regs state of VM%u!\n",
+ vm->vmid);
+ kfree(cpu_regs);
+ break;
+ case ACRN_IOCTL_SET_MEMSEG:
+ if (copy_from_user(&memmap, (void __user *)ioctl_param,
+ sizeof(memmap)))
+ return -EFAULT;
+
+ ret = acrn_vm_memseg_map(vm, &memmap);
+ break;
+ case ACRN_IOCTL_UNSET_MEMSEG:
+ if (copy_from_user(&memmap, (void __user *)ioctl_param,
+ sizeof(memmap)))
+ return -EFAULT;
+
+ ret = acrn_vm_memseg_unmap(vm, &memmap);
+ break;
+ case ACRN_IOCTL_ASSIGN_PCIDEV:
+ pcidev = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_pcidev));
+ if (IS_ERR(pcidev))
+ return PTR_ERR(pcidev);
+
+ ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to assign pci device!\n");
+ kfree(pcidev);
+ break;
+ case ACRN_IOCTL_DEASSIGN_PCIDEV:
+ pcidev = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_pcidev));
+ if (IS_ERR(pcidev))
+ return PTR_ERR(pcidev);
+
+ ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to deassign pci device!\n");
+ kfree(pcidev);
+ break;
+ case ACRN_IOCTL_SET_PTDEV_INTR:
+ irq_info = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_ptdev_irq));
+ if (IS_ERR(irq_info))
+ return PTR_ERR(irq_info);
+
+ ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to configure intr for ptdev!\n");
+ kfree(irq_info);
+ break;
+ case ACRN_IOCTL_RESET_PTDEV_INTR:
+ irq_info = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_ptdev_irq));
+ if (IS_ERR(irq_info))
+ return PTR_ERR(irq_info);
+
+ ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to reset intr for ptdev!\n");
+ kfree(irq_info);
+ break;
+ case ACRN_IOCTL_SET_IRQLINE:
+ ret = hcall_set_irqline(vm->vmid, ioctl_param);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set interrupt line!\n");
+ break;
+ case ACRN_IOCTL_INJECT_MSI:
+ msi = memdup_user((void __user *)ioctl_param,
+ sizeof(struct acrn_msi_entry));
+ if (IS_ERR(msi))
+ return PTR_ERR(msi);
+
+ ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to inject MSI!\n");
+ kfree(msi);
+ break;
+ case ACRN_IOCTL_VM_INTR_MONITOR:
+ ret = pin_user_pages_fast(ioctl_param, 1,
+ FOLL_WRITE | FOLL_LONGTERM, &page);
+ if (unlikely(ret != 1)) {
+ dev_dbg(acrn_dev.this_device,
+ "Failed to pin intr hdr buffer!\n");
+ return -EFAULT;
+ }
+
+ ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
+ if (ret < 0) {
+ unpin_user_page(page);
+ dev_dbg(acrn_dev.this_device,
+ "Failed to monitor intr data!\n");
+ return ret;
+ }
+ if (vm->monitor_page)
+ unpin_user_page(vm->monitor_page);
+ vm->monitor_page = page;
+ break;
+ case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
+ if (vm->default_client)
+ return -EEXIST;
+ if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
+ ret = -EINVAL;
+ break;
+ case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
+ if (vm->default_client)
+ acrn_ioreq_client_destroy(vm->default_client);
+ break;
+ case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
+ if (vm->default_client)
+ ret = acrn_ioreq_client_wait(vm->default_client);
+ else
+ ret = -ENODEV;
+ break;
+ case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
+ if (copy_from_user(&notify, (void __user *)ioctl_param,
+ sizeof(struct acrn_ioreq_notify)))
+ return -EFAULT;
+
+ if (notify.reserved != 0)
+ return -EINVAL;
+
+ ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
+ break;
+ case ACRN_IOCTL_CLEAR_VM_IOREQ:
+ acrn_ioreq_request_clear(vm);
+ break;
+ case ACRN_IOCTL_PM_GET_CPU_STATE:
+ if (copy_from_user(&cstate_cmd, (void *)ioctl_param,
+ sizeof(cstate_cmd)))
+ return -EFAULT;
+
+ ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
+ break;
+ case ACRN_IOCTL_IOEVENTFD:
+ if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
+ sizeof(ioeventfd)))
+ return -EFAULT;
+
+ if (ioeventfd.reserved != 0)
+ return -EINVAL;
+
+ ret = acrn_ioeventfd_config(vm, &ioeventfd);
+ break;
+ case ACRN_IOCTL_IRQFD:
+ if (copy_from_user(&irqfd, (void __user *)ioctl_param,
+ sizeof(irqfd)))
+ return -EFAULT;
+ ret = acrn_irqfd_config(vm, &irqfd);
+ break;
+ default:
+ dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int acrn_dev_release(struct inode *inode, struct file *filp)
+{
+ struct acrn_vm *vm = filp->private_data;
+
+ acrn_vm_destroy(vm);
+ kfree(vm);
+ return 0;
+}
+
+static ssize_t remove_cpu_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 cpu, lapicid;
+ int ret;
+
+ if (kstrtoull(buf, 0, &cpu) < 0)
+ return -EINVAL;
+
+ if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
+ return -EINVAL;
+
+ if (cpu_online(cpu))
+ remove_cpu(cpu);
+
+ lapicid = cpu_data(cpu).apicid;
+ dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
+ ret = hcall_sos_remove_cpu(lapicid);
+ if (ret < 0) {
+ dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
+ goto fail_remove;
+ }
+
+ return count;
+
+fail_remove:
+ add_cpu(cpu);
+ return ret;
+}
+static DEVICE_ATTR_WO(remove_cpu);
+
+static struct attribute *acrn_attrs[] = {
+ &dev_attr_remove_cpu.attr,
+ NULL
+};
+
+static struct attribute_group acrn_attr_group = {
+ .attrs = acrn_attrs,
+};
+
+static const struct attribute_group *acrn_attr_groups[] = {
+ &acrn_attr_group,
+ NULL
+};
+
+static const struct file_operations acrn_fops = {
+ .owner = THIS_MODULE,
+ .open = acrn_dev_open,
+ .release = acrn_dev_release,
+ .unlocked_ioctl = acrn_dev_ioctl,
+};
+
+struct miscdevice acrn_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "acrn_hsm",
+ .fops = &acrn_fops,
+ .groups = acrn_attr_groups,
+};
+
+static int __init hsm_init(void)
+{
+ int ret;
+
+ if (x86_hyper_type != X86_HYPER_ACRN)
+ return -ENODEV;
+
+ if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
+ return -EPERM;
+
+ ret = misc_register(&acrn_dev);
+ if (ret) {
+ pr_err("Create misc dev failed!\n");
+ return ret;
+ }
+
+ ret = acrn_ioreq_intr_setup();
+ if (ret) {
+ pr_err("Setup I/O request handler failed!\n");
+ misc_deregister(&acrn_dev);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit hsm_exit(void)
+{
+ acrn_ioreq_intr_remove();
+ misc_deregister(&acrn_dev);
+}
+module_init(hsm_init);
+module_exit(hsm_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
diff --git a/drivers/virt/acrn/hypercall.h b/drivers/virt/acrn/hypercall.h
new file mode 100644
index 000000000000..0cfad05bd1a9
--- /dev/null
+++ b/drivers/virt/acrn/hypercall.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ACRN HSM: hypercalls of ACRN Hypervisor
+ */
+#ifndef __ACRN_HSM_HYPERCALL_H
+#define __ACRN_HSM_HYPERCALL_H
+#include <asm/acrn.h>
+
+/*
+ * Hypercall IDs of the ACRN Hypervisor
+ */
+#define _HC_ID(x, y) (((x) << 24) | (y))
+
+#define HC_ID 0x80UL
+
+#define HC_ID_GEN_BASE 0x0UL
+#define HC_SOS_REMOVE_CPU _HC_ID(HC_ID, HC_ID_GEN_BASE + 0x01)
+
+#define HC_ID_VM_BASE 0x10UL
+#define HC_CREATE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x00)
+#define HC_DESTROY_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x01)
+#define HC_START_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x02)
+#define HC_PAUSE_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x03)
+#define HC_RESET_VM _HC_ID(HC_ID, HC_ID_VM_BASE + 0x05)
+#define HC_SET_VCPU_REGS _HC_ID(HC_ID, HC_ID_VM_BASE + 0x06)
+
+#define HC_ID_IRQ_BASE 0x20UL
+#define HC_INJECT_MSI _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x03)
+#define HC_VM_INTR_MONITOR _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x04)
+#define HC_SET_IRQLINE _HC_ID(HC_ID, HC_ID_IRQ_BASE + 0x05)
+
+#define HC_ID_IOREQ_BASE 0x30UL
+#define HC_SET_IOREQ_BUFFER _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x00)
+#define HC_NOTIFY_REQUEST_FINISH _HC_ID(HC_ID, HC_ID_IOREQ_BASE + 0x01)
+
+#define HC_ID_MEM_BASE 0x40UL
+#define HC_VM_SET_MEMORY_REGIONS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02)
+
+#define HC_ID_PCI_BASE 0x50UL
+#define HC_SET_PTDEV_INTR _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x03)
+#define HC_RESET_PTDEV_INTR _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x04)
+#define HC_ASSIGN_PCIDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x05)
+#define HC_DEASSIGN_PCIDEV _HC_ID(HC_ID, HC_ID_PCI_BASE + 0x06)
+
+#define HC_ID_PM_BASE 0x80UL
+#define HC_PM_GET_CPU_STATE _HC_ID(HC_ID, HC_ID_PM_BASE + 0x00)
+
+/**
+ * hcall_sos_remove_cpu() - Remove a vCPU of Service VM
+ * @cpu: The vCPU to be removed
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_sos_remove_cpu(u64 cpu)
+{
+ return acrn_hypercall1(HC_SOS_REMOVE_CPU, cpu);
+}
+
+/**
+ * hcall_create_vm() - Create a User VM
+ * @vminfo: Service VM GPA of info of User VM creation
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_create_vm(u64 vminfo)
+{
+ return acrn_hypercall1(HC_CREATE_VM, vminfo);
+}
+
+/**
+ * hcall_start_vm() - Start a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_start_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_START_VM, vmid);
+}
+
+/**
+ * hcall_pause_vm() - Pause a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_pause_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_PAUSE_VM, vmid);
+}
+
+/**
+ * hcall_destroy_vm() - Destroy a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_destroy_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_DESTROY_VM, vmid);
+}
+
+/**
+ * hcall_reset_vm() - Reset a User VM
+ * @vmid: User VM ID
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_reset_vm(u64 vmid)
+{
+ return acrn_hypercall1(HC_RESET_VM, vmid);
+}
+
+/**
+ * hcall_set_vcpu_regs() - Set up registers of virtual CPU of a User VM
+ * @vmid: User VM ID
+ * @regs_state: Service VM GPA of registers state
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_vcpu_regs(u64 vmid, u64 regs_state)
+{
+ return acrn_hypercall2(HC_SET_VCPU_REGS, vmid, regs_state);
+}
+
+/**
+ * hcall_inject_msi() - Deliver a MSI interrupt to a User VM
+ * @vmid: User VM ID
+ * @msi: Service VM GPA of MSI message
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_inject_msi(u64 vmid, u64 msi)
+{
+ return acrn_hypercall2(HC_INJECT_MSI, vmid, msi);
+}
+
+/**
+ * hcall_vm_intr_monitor() - Set a shared page for User VM interrupt statistics
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the shared page
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_vm_intr_monitor(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_VM_INTR_MONITOR, vmid, addr);
+}
+
+/**
+ * hcall_set_irqline() - Set or clear an interrupt line
+ * @vmid: User VM ID
+ * @op: Service VM GPA of interrupt line operations
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_irqline(u64 vmid, u64 op)
+{
+ return acrn_hypercall2(HC_SET_IRQLINE, vmid, op);
+}
+
+/**
+ * hcall_set_ioreq_buffer() - Set up the shared buffer for I/O Requests.
+ * @vmid: User VM ID
+ * @buffer: Service VM GPA of the shared buffer
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_ioreq_buffer(u64 vmid, u64 buffer)
+{
+ return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer);
+}
+
+/**
+ * hcall_notify_req_finish() - Notify ACRN Hypervisor of I/O request completion.
+ * @vmid: User VM ID
+ * @vcpu: The vCPU which initiated the I/O request
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_notify_req_finish(u64 vmid, u64 vcpu)
+{
+ return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu);
+}
+
+/**
+ * hcall_set_memory_regions() - Inform the hypervisor to set up EPT mappings
+ * @regions_pa: Service VM GPA of &struct vm_memory_region_batch
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_memory_regions(u64 regions_pa)
+{
+ return acrn_hypercall1(HC_VM_SET_MEMORY_REGIONS, regions_pa);
+}
+
+/**
+ * hcall_assign_pcidev() - Assign a PCI device to a User VM
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the &struct acrn_pcidev
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_assign_pcidev(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_ASSIGN_PCIDEV, vmid, addr);
+}
+
+/**
+ * hcall_deassign_pcidev() - De-assign a PCI device from a User VM
+ * @vmid: User VM ID
+ * @addr: Service VM GPA of the &struct acrn_pcidev
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_deassign_pcidev(u64 vmid, u64 addr)
+{
+ return acrn_hypercall2(HC_DEASSIGN_PCIDEV, vmid, addr);
+}
+
+/**
+ * hcall_set_ptdev_intr() - Configure an interrupt for an assigned PCI device.
+ * @vmid: User VM ID
+ * @irq: Service VM GPA of the &struct acrn_ptdev_irq
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_set_ptdev_intr(u64 vmid, u64 irq)
+{
+ return acrn_hypercall2(HC_SET_PTDEV_INTR, vmid, irq);
+}
+
+/**
+ * hcall_reset_ptdev_intr() - Reset an interrupt for an assigned PCI device.
+ * @vmid: User VM ID
+ * @irq: Service VM GPA of the &struct acrn_ptdev_irq
+ *
+ * Return: 0 on success, <0 on failure
+ */
+static inline long hcall_reset_ptdev_intr(u64 vmid, u64 irq)
+{
+ return acrn_hypercall2(HC_RESET_PTDEV_INTR, vmid, irq);
+}
+
+/*
+ * hcall_get_cpu_state() - Get P-states and C-states info from the hypervisor
+ * @state: Service VM GPA of buffer of P-states and C-states
+ */
+static inline long hcall_get_cpu_state(u64 cmd, u64 state)
+{
+ return acrn_hypercall2(HC_PM_GET_CPU_STATE, cmd, state);
+}
+
+#endif /* __ACRN_HSM_HYPERCALL_H */
diff --git a/drivers/virt/acrn/ioeventfd.c b/drivers/virt/acrn/ioeventfd.c
new file mode 100644
index 000000000000..ac4037e9f947
--- /dev/null
+++ b/drivers/virt/acrn/ioeventfd.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN HSM eventfd - use eventfd objects to signal expected I/O requests
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Shuo Liu <shuo.a.liu@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/eventfd.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+/**
+ * struct hsm_ioeventfd - Properties of HSM ioeventfd
+ * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
+ * @eventfd: Eventfd of the HSM ioeventfd
+ * @addr: Address of I/O range
+ * @data: Data for matching
+ * @length: Length of I/O range
+ * @type: Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO)
+ * @wildcard: Data matching or not
+ */
+struct hsm_ioeventfd {
+ struct list_head list;
+ struct eventfd_ctx *eventfd;
+ u64 addr;
+ u64 data;
+ int length;
+ int type;
+ bool wildcard;
+};
+
+static inline int ioreq_type_from_flags(int flags)
+{
+ return flags & ACRN_IOEVENTFD_FLAG_PIO ?
+ ACRN_IOREQ_TYPE_PORTIO : ACRN_IOREQ_TYPE_MMIO;
+}
+
+static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p)
+{
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ eventfd_ctx_put(p->eventfd);
+ list_del(&p->list);
+ kfree(p);
+}
+
+static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm,
+ struct hsm_ioeventfd *ioeventfd)
+{
+ struct hsm_ioeventfd *p;
+
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ /* Either one is wildcard, the data matching will be skipped. */
+ list_for_each_entry(p, &vm->ioeventfds, list)
+ if (p->eventfd == ioeventfd->eventfd &&
+ p->addr == ioeventfd->addr &&
+ p->type == ioeventfd->type &&
+ (p->wildcard || ioeventfd->wildcard ||
+ p->data == ioeventfd->data))
+ return true;
+
+ return false;
+}
+
+/*
+ * Assign an eventfd to a VM and create a HSM ioeventfd associated with the
+ * eventfd. The properties of the HSM ioeventfd are built from a &struct
+ * acrn_ioeventfd.
+ */
+static int acrn_ioeventfd_assign(struct acrn_vm *vm,
+ struct acrn_ioeventfd *args)
+{
+ struct eventfd_ctx *eventfd;
+ struct hsm_ioeventfd *p;
+ int ret;
+
+ /* Check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /*
+ * Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width
+ * accesses can cover vhost's requirements.
+ */
+ if (!(args->len == 1 || args->len == 2 ||
+ args->len == 4 || args->len == 8))
+ return -EINVAL;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&p->list);
+ p->addr = args->addr;
+ p->length = args->len;
+ p->eventfd = eventfd;
+ p->type = ioreq_type_from_flags(args->flags);
+
+ /*
+ * ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the
+ * writing of notification register of each virtqueue may trigger the
+ * notification. There is no data matching requirement.
+ */
+ if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
+ p->data = args->data;
+ else
+ p->wildcard = true;
+
+ mutex_lock(&vm->ioeventfds_lock);
+
+ if (hsm_ioeventfd_is_conflict(vm, p)) {
+ ret = -EEXIST;
+ goto unlock_fail;
+ }
+
+ /* register the I/O range into ioreq client */
+ ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
+ p->addr, p->addr + p->length - 1);
+ if (ret < 0)
+ goto unlock_fail;
+
+ list_add_tail(&p->list, &vm->ioeventfds);
+ mutex_unlock(&vm->ioeventfds_lock);
+
+ return 0;
+
+unlock_fail:
+ mutex_unlock(&vm->ioeventfds_lock);
+ kfree(p);
+fail:
+ eventfd_ctx_put(eventfd);
+ return ret;
+}
+
+static int acrn_ioeventfd_deassign(struct acrn_vm *vm,
+ struct acrn_ioeventfd *args)
+{
+ struct hsm_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&vm->ioeventfds_lock);
+ list_for_each_entry(p, &vm->ioeventfds, list) {
+ if (p->eventfd != eventfd)
+ continue;
+
+ acrn_ioreq_range_del(vm->ioeventfd_client, p->type,
+ p->addr, p->addr + p->length - 1);
+ acrn_ioeventfd_shutdown(vm, p);
+ break;
+ }
+ mutex_unlock(&vm->ioeventfds_lock);
+
+ eventfd_ctx_put(eventfd);
+ return 0;
+}
+
+static struct hsm_ioeventfd *hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr,
+ u64 data, int len, int type)
+{
+ struct hsm_ioeventfd *p = NULL;
+
+ lockdep_assert_held(&vm->ioeventfds_lock);
+
+ list_for_each_entry(p, &vm->ioeventfds, list) {
+ if (p->type == type && p->addr == addr && p->length >= len &&
+ (p->wildcard || p->data == data))
+ return p;
+ }
+
+ return NULL;
+}
+
+static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
+ struct acrn_io_request *req)
+{
+ struct hsm_ioeventfd *p;
+ u64 addr, val;
+ int size;
+
+ if (req->type == ACRN_IOREQ_TYPE_MMIO) {
+ /*
+ * I/O requests are dispatched by range check only, so a
+ * acrn_ioreq_client need process both READ and WRITE accesses
+ * of same range. READ accesses are safe to be ignored here
+ * because virtio PCI devices write the notify registers for
+ * notification.
+ */
+ if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
+ /* reading does nothing and return 0 */
+ req->reqs.mmio_request.value = 0;
+ return 0;
+ }
+ addr = req->reqs.mmio_request.address;
+ size = req->reqs.mmio_request.size;
+ val = req->reqs.mmio_request.value;
+ } else {
+ if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) {
+ /* reading does nothing and return 0 */
+ req->reqs.pio_request.value = 0;
+ return 0;
+ }
+ addr = req->reqs.pio_request.address;
+ size = req->reqs.pio_request.size;
+ val = req->reqs.pio_request.value;
+ }
+
+ mutex_lock(&client->vm->ioeventfds_lock);
+ p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
+ if (p)
+ eventfd_signal(p->eventfd, 1);
+ mutex_unlock(&client->vm->ioeventfds_lock);
+
+ return 0;
+}
+
+int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
+{
+ int ret;
+
+ if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
+ ret = acrn_ioeventfd_deassign(vm, args);
+ else
+ ret = acrn_ioeventfd_assign(vm, args);
+
+ return ret;
+}
+
+int acrn_ioeventfd_init(struct acrn_vm *vm)
+{
+ char name[ACRN_NAME_LEN];
+
+ mutex_init(&vm->ioeventfds_lock);
+ INIT_LIST_HEAD(&vm->ioeventfds);
+ snprintf(name, sizeof(name), "ioeventfd-%u", vm->vmid);
+ vm->ioeventfd_client = acrn_ioreq_client_create(vm,
+ acrn_ioeventfd_handler,
+ NULL, false, name);
+ if (!vm->ioeventfd_client) {
+ dev_err(acrn_dev.this_device, "Failed to create ioeventfd ioreq client!\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(acrn_dev.this_device, "VM %u ioeventfd init.\n", vm->vmid);
+ return 0;
+}
+
+void acrn_ioeventfd_deinit(struct acrn_vm *vm)
+{
+ struct hsm_ioeventfd *p, *next;
+
+ dev_dbg(acrn_dev.this_device, "VM %u ioeventfd deinit.\n", vm->vmid);
+ acrn_ioreq_client_destroy(vm->ioeventfd_client);
+ mutex_lock(&vm->ioeventfds_lock);
+ list_for_each_entry_safe(p, next, &vm->ioeventfds, list)
+ acrn_ioeventfd_shutdown(vm, p);
+ mutex_unlock(&vm->ioeventfds_lock);
+}
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
new file mode 100644
index 000000000000..80b2e3f0e276
--- /dev/null
+++ b/drivers/virt/acrn/ioreq.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN_HSM: Handle I/O requests
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Jason Chen CJ <jason.cj.chen@intel.com>
+ * Fengwei Yin <fengwei.yin@intel.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/acrn.h>
+
+#include "acrn_drv.h"
+
+static void ioreq_pause(void);
+static void ioreq_resume(void);
+
+static void ioreq_dispatcher(struct work_struct *work);
+static struct workqueue_struct *ioreq_wq;
+static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
+
+static inline bool has_pending_request(struct acrn_ioreq_client *client)
+{
+ return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
+}
+
+static inline bool is_destroying(struct acrn_ioreq_client *client)
+{
+ return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
+}
+
+static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
+ struct acrn_io_request *acrn_req)
+{
+ bool polling_mode;
+ int ret = 0;
+
+ polling_mode = acrn_req->completion_polling;
+ /* Add barrier() to make sure the writes are done before completion */
+ smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
+
+ /*
+ * To fulfill the requirement of real-time in several industry
+ * scenarios, like automotive, ACRN can run under the partition mode,
+ * in which User VMs and Service VM are bound to dedicated CPU cores.
+ * Polling mode of handling the I/O request is introduced to achieve a
+ * faster I/O request handling. In polling mode, the hypervisor polls
+ * I/O request's completion. Once an I/O request is marked as
+ * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
+ * to continue the I/O request flow. Thus, the completion notification
+ * from HSM of I/O request is not needed. Please note,
+ * completion_polling needs to be read before the I/O request being
+ * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
+ * hypervisor.
+ */
+ if (!polling_mode) {
+ ret = hcall_notify_req_finish(vm->vmid, vcpu);
+ if (ret < 0)
+ dev_err(acrn_dev.this_device,
+ "Notify I/O request finished failed!\n");
+ }
+
+ return ret;
+}
+
+static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
+ u16 vcpu,
+ struct acrn_io_request *acrn_req)
+{
+ int ret;
+
+ if (vcpu >= client->vm->vcpu_num)
+ return -EINVAL;
+
+ clear_bit(vcpu, client->ioreqs_map);
+ if (!acrn_req) {
+ acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
+ acrn_req += vcpu;
+ }
+
+ ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
+
+ return ret;
+}
+
+int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
+{
+ int ret = 0;
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (vm->default_client)
+ ret = acrn_ioreq_complete_request(vm->default_client,
+ vcpu, NULL);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ return ret;
+}
+
+/**
+ * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ *
+ * Return: 0 on success, <0 on error
+ */
+int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ if (end < start) {
+ dev_err(acrn_dev.this_device,
+ "Invalid IO range [0x%llx,0x%llx]\n", start, end);
+ return -EINVAL;
+ }
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+
+ range->type = type;
+ range->start = start;
+ range->end = end;
+
+ write_lock_bh(&client->range_lock);
+ list_add(&range->list, &client->range_list);
+ write_unlock_bh(&client->range_lock);
+
+ return 0;
+}
+
+/**
+ * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
+ * @client: The ioreq client
+ * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
+ * @start: Start address of iorange
+ * @end: End address of iorange
+ */
+void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
+ u32 type, u64 start, u64 end)
+{
+ struct acrn_ioreq_range *range;
+
+ write_lock_bh(&client->range_lock);
+ list_for_each_entry(range, &client->range_list, list) {
+ if (type == range->type &&
+ start == range->start &&
+ end == range->end) {
+ list_del(&range->list);
+ kfree(range);
+ break;
+ }
+ }
+ write_unlock_bh(&client->range_lock);
+}
+
+/*
+ * ioreq_task() is the execution entity of handler thread of an I/O client.
+ * The handler callback of the I/O client is called within the handler thread.
+ */
+static int ioreq_task(void *data)
+{
+ struct acrn_ioreq_client *client = data;
+ struct acrn_io_request *req;
+ unsigned long *ioreqs_map;
+ int vcpu, ret;
+
+ /*
+ * Lockless access to ioreqs_map is safe, because
+ * 1) set_bit() and clear_bit() are atomic operations.
+ * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
+ * set_bit() - in ioreq_work handler
+ * Handler callback handles corresponding I/O request
+ * clear_bit() - in handler thread (include ACRN userspace)
+ * Mark corresponding I/O request completed
+ * Loop again if a new I/O request occurs
+ */
+ ioreqs_map = client->ioreqs_map;
+ while (!kthread_should_stop()) {
+ acrn_ioreq_client_wait(client);
+ while (has_pending_request(client)) {
+ vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
+ req = client->vm->ioreq_buf->req_slot + vcpu;
+ ret = client->handler(client, req);
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device,
+ "IO handle failure: %d\n", ret);
+ break;
+ }
+ acrn_ioreq_complete_request(client, vcpu, req);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For the non-default I/O clients, give them chance to complete the current
+ * I/O requests if there are any. For the default I/O client, it is safe to
+ * clear all pending I/O requests because the clearing request is from ACRN
+ * userspace.
+ */
+void acrn_ioreq_request_clear(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client;
+ bool has_pending = false;
+ unsigned long vcpu;
+ int retry = 10;
+
+ /*
+ * IO requests of this VM will be completed directly in
+ * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
+ */
+ set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
+
+ /*
+ * acrn_ioreq_request_clear is only called in VM reset case. Simply
+ * wait 100ms in total for the IO requests' completion.
+ */
+ do {
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ list_for_each_entry(client, &vm->ioreq_clients, list) {
+ has_pending = has_pending_request(client);
+ if (has_pending)
+ break;
+ }
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ if (has_pending)
+ schedule_timeout_interruptible(HZ / 100);
+ } while (has_pending && --retry > 0);
+ if (retry == 0)
+ dev_warn(acrn_dev.this_device,
+ "%s cannot flush pending request!\n", client->name);
+
+ /* Clear all ioreqs belonging to the default client */
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ client = vm->default_client;
+ if (client) {
+ vcpu = find_next_bit(client->ioreqs_map,
+ ACRN_IO_REQUEST_MAX, 0);
+ while (vcpu < ACRN_IO_REQUEST_MAX) {
+ acrn_ioreq_complete_request(client, vcpu, NULL);
+ vcpu = find_next_bit(client->ioreqs_map,
+ ACRN_IO_REQUEST_MAX, vcpu + 1);
+ }
+ }
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ /* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
+ clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
+}
+
+int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
+{
+ if (client->is_default) {
+ /*
+ * In the default client, a user space thread waits on the
+ * waitqueue. The is_destroying() check is used to notify user
+ * space the client is going to be destroyed.
+ */
+ wait_event_interruptible(client->wq,
+ has_pending_request(client) ||
+ is_destroying(client));
+ if (is_destroying(client))
+ return -ENODEV;
+ } else {
+ wait_event_interruptible(client->wq,
+ has_pending_request(client) ||
+ kthread_should_stop());
+ }
+
+ return 0;
+}
+
+static bool is_cfg_addr(struct acrn_io_request *req)
+{
+ return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
+ (req->reqs.pio_request.address == 0xcf8));
+}
+
+static bool is_cfg_data(struct acrn_io_request *req)
+{
+ return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
+ ((req->reqs.pio_request.address >= 0xcfc) &&
+ (req->reqs.pio_request.address < (0xcfc + 4))));
+}
+
+/* The low 8-bit of supported pci_reg addr.*/
+#define PCI_LOWREG_MASK 0xFC
+/* The high 4-bit of supported pci_reg addr */
+#define PCI_HIGHREG_MASK 0xF00
+/* Max number of supported functions */
+#define PCI_FUNCMAX 7
+/* Max number of supported slots */
+#define PCI_SLOTMAX 31
+/* Max number of supported buses */
+#define PCI_BUSMAX 255
+#define CONF1_ENABLE 0x80000000UL
+/*
+ * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
+ * following steps:
+ * 1) writes address into 0xCF8 port
+ * 2) accesses data in/from 0xCFC
+ * This function combines such paired PCI configuration space I/O requests into
+ * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
+ */
+static bool handle_cf8cfc(struct acrn_vm *vm,
+ struct acrn_io_request *req, u16 vcpu)
+{
+ int offset, pci_cfg_addr, pci_reg;
+ bool is_handled = false;
+
+ if (is_cfg_addr(req)) {
+ WARN_ON(req->reqs.pio_request.size != 4);
+ if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
+ vm->pci_conf_addr = req->reqs.pio_request.value;
+ else
+ req->reqs.pio_request.value = vm->pci_conf_addr;
+ is_handled = true;
+ } else if (is_cfg_data(req)) {
+ if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
+ if (req->reqs.pio_request.direction ==
+ ACRN_IOREQ_DIR_READ)
+ req->reqs.pio_request.value = 0xffffffff;
+ is_handled = true;
+ } else {
+ offset = req->reqs.pio_request.address - 0xcfc;
+
+ req->type = ACRN_IOREQ_TYPE_PCICFG;
+ pci_cfg_addr = vm->pci_conf_addr;
+ req->reqs.pci_request.bus =
+ (pci_cfg_addr >> 16) & PCI_BUSMAX;
+ req->reqs.pci_request.dev =
+ (pci_cfg_addr >> 11) & PCI_SLOTMAX;
+ req->reqs.pci_request.func =
+ (pci_cfg_addr >> 8) & PCI_FUNCMAX;
+ pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
+ ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
+ req->reqs.pci_request.reg = pci_reg + offset;
+ }
+ }
+
+ if (is_handled)
+ ioreq_complete_request(vm, vcpu, req);
+
+ return is_handled;
+}
+
+static bool in_range(struct acrn_ioreq_range *range,
+ struct acrn_io_request *req)
+{
+ bool ret = false;
+
+ if (range->type == req->type) {
+ switch (req->type) {
+ case ACRN_IOREQ_TYPE_MMIO:
+ if (req->reqs.mmio_request.address >= range->start &&
+ (req->reqs.mmio_request.address +
+ req->reqs.mmio_request.size - 1) <= range->end)
+ ret = true;
+ break;
+ case ACRN_IOREQ_TYPE_PORTIO:
+ if (req->reqs.pio_request.address >= range->start &&
+ (req->reqs.pio_request.address +
+ req->reqs.pio_request.size - 1) <= range->end)
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
+ struct acrn_io_request *req)
+{
+ struct acrn_ioreq_client *client, *found = NULL;
+ struct acrn_ioreq_range *range;
+
+ lockdep_assert_held(&vm->ioreq_clients_lock);
+
+ list_for_each_entry(client, &vm->ioreq_clients, list) {
+ read_lock_bh(&client->range_lock);
+ list_for_each_entry(range, &client->range_list, list) {
+ if (in_range(range, req)) {
+ found = client;
+ break;
+ }
+ }
+ read_unlock_bh(&client->range_lock);
+ if (found)
+ break;
+ }
+ return found ? found : vm->default_client;
+}
+
+/**
+ * acrn_ioreq_client_create() - Create an ioreq client
+ * @vm: The VM that this client belongs to
+ * @handler: The ioreq_handler of ioreq client acrn_hsm will create a kernel
+ * thread and call the handler to handle I/O requests.
+ * @priv: Private data for the handler
+ * @is_default: If it is the default client
+ * @name: The name of ioreq client
+ *
+ * Return: acrn_ioreq_client pointer on success, NULL on error
+ */
+struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
+ ioreq_handler_t handler,
+ void *priv, bool is_default,
+ const char *name)
+{
+ struct acrn_ioreq_client *client;
+
+ if (!handler && !is_default) {
+ dev_dbg(acrn_dev.this_device,
+ "Cannot create non-default client w/o handler!\n");
+ return NULL;
+ }
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->handler = handler;
+ client->vm = vm;
+ client->priv = priv;
+ client->is_default = is_default;
+ if (name)
+ strncpy(client->name, name, sizeof(client->name) - 1);
+ rwlock_init(&client->range_lock);
+ INIT_LIST_HEAD(&client->range_list);
+ init_waitqueue_head(&client->wq);
+
+ if (client->handler) {
+ client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
+ client->vm->vmid, client->name);
+ if (IS_ERR(client->thread)) {
+ kfree(client);
+ return NULL;
+ }
+ }
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (is_default)
+ vm->default_client = client;
+ else
+ list_add(&client->list, &vm->ioreq_clients);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
+ return client;
+}
+
+/**
+ * acrn_ioreq_client_destroy() - Destroy an ioreq client
+ * @client: The ioreq client
+ */
+void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
+{
+ struct acrn_ioreq_range *range, *next;
+ struct acrn_vm *vm = client->vm;
+
+ dev_dbg(acrn_dev.this_device,
+ "Destroy ioreq client %s.\n", client->name);
+ ioreq_pause();
+ set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
+ if (client->is_default)
+ wake_up_interruptible(&client->wq);
+ else
+ kthread_stop(client->thread);
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ if (client->is_default)
+ vm->default_client = NULL;
+ else
+ list_del(&client->list);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+
+ write_lock_bh(&client->range_lock);
+ list_for_each_entry_safe(range, next, &client->range_list, list) {
+ list_del(&range->list);
+ kfree(range);
+ }
+ write_unlock_bh(&client->range_lock);
+ kfree(client);
+
+ ioreq_resume();
+}
+
+static int acrn_ioreq_dispatch(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client;
+ struct acrn_io_request *req;
+ int i;
+
+ for (i = 0; i < vm->vcpu_num; i++) {
+ req = vm->ioreq_buf->req_slot + i;
+
+ /* barrier the read of processed of acrn_io_request */
+ if (smp_load_acquire(&req->processed) ==
+ ACRN_IOREQ_STATE_PENDING) {
+ /* Complete the IO request directly in clearing stage */
+ if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
+ ioreq_complete_request(vm, i, req);
+ continue;
+ }
+ if (handle_cf8cfc(vm, req, i))
+ continue;
+
+ spin_lock_bh(&vm->ioreq_clients_lock);
+ client = find_ioreq_client(vm, req);
+ if (!client) {
+ dev_err(acrn_dev.this_device,
+ "Failed to find ioreq client!\n");
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+ return -EINVAL;
+ }
+ if (!client->is_default)
+ req->kernel_handled = 1;
+ else
+ req->kernel_handled = 0;
+ /*
+ * Add barrier() to make sure the writes are done
+ * before setting ACRN_IOREQ_STATE_PROCESSING
+ */
+ smp_store_release(&req->processed,
+ ACRN_IOREQ_STATE_PROCESSING);
+ set_bit(i, client->ioreqs_map);
+ wake_up_interruptible(&client->wq);
+ spin_unlock_bh(&vm->ioreq_clients_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void ioreq_dispatcher(struct work_struct *work)
+{
+ struct acrn_vm *vm;
+
+ read_lock(&acrn_vm_list_lock);
+ list_for_each_entry(vm, &acrn_vm_list, list) {
+ if (!vm->ioreq_buf)
+ break;
+ acrn_ioreq_dispatch(vm);
+ }
+ read_unlock(&acrn_vm_list_lock);
+}
+
+static void ioreq_intr_handler(void)
+{
+ queue_work(ioreq_wq, &ioreq_work);
+}
+
+static void ioreq_pause(void)
+{
+ /* Flush and unarm the handler to ensure no I/O requests pending */
+ acrn_remove_intr_handler();
+ drain_workqueue(ioreq_wq);
+}
+
+static void ioreq_resume(void)
+{
+ /* Schedule after enabling in case other clients miss interrupt */
+ acrn_setup_intr_handler(ioreq_intr_handler);
+ queue_work(ioreq_wq, &ioreq_work);
+}
+
+int acrn_ioreq_intr_setup(void)
+{
+ acrn_setup_intr_handler(ioreq_intr_handler);
+ ioreq_wq = alloc_workqueue("ioreq_wq",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!ioreq_wq) {
+ dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
+ acrn_remove_intr_handler();
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void acrn_ioreq_intr_remove(void)
+{
+ if (ioreq_wq)
+ destroy_workqueue(ioreq_wq);
+ acrn_remove_intr_handler();
+}
+
+int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
+{
+ struct acrn_ioreq_buffer *set_buffer;
+ struct page *page;
+ int ret;
+
+ if (vm->ioreq_buf)
+ return -EEXIST;
+
+ set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
+ if (!set_buffer)
+ return -ENOMEM;
+
+ ret = pin_user_pages_fast(buf_vma, 1,
+ FOLL_WRITE | FOLL_LONGTERM, &page);
+ if (unlikely(ret != 1) || !page) {
+ dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ vm->ioreq_buf = page_address(page);
+ vm->ioreq_page = page;
+ set_buffer->ioreq_buf = page_to_phys(page);
+ ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
+ unpin_user_page(page);
+ vm->ioreq_buf = NULL;
+ goto free_buf;
+ }
+
+ dev_dbg(acrn_dev.this_device,
+ "Init ioreq buffer %pK!\n", vm->ioreq_buf);
+ ret = 0;
+free_buf:
+ kfree(set_buffer);
+ return ret;
+}
+
+void acrn_ioreq_deinit(struct acrn_vm *vm)
+{
+ struct acrn_ioreq_client *client, *next;
+
+ dev_dbg(acrn_dev.this_device,
+ "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
+ /* Destroy all clients belonging to this VM */
+ list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
+ acrn_ioreq_client_destroy(client);
+ if (vm->default_client)
+ acrn_ioreq_client_destroy(vm->default_client);
+
+ if (vm->ioreq_buf && vm->ioreq_page) {
+ unpin_user_page(vm->ioreq_page);
+ vm->ioreq_buf = NULL;
+ }
+}
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
new file mode 100644
index 000000000000..a8766d528e29
--- /dev/null
+++ b/drivers/virt/acrn/irqfd.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Shuo Liu <shuo.a.liu@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+
+#include <linux/eventfd.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+static LIST_HEAD(acrn_irqfd_clients);
+static DEFINE_MUTEX(acrn_irqfds_mutex);
+
+/**
+ * struct hsm_irqfd - Properties of HSM irqfd
+ * @vm: Associated VM pointer
+ * @wait: Entry of wait-queue
+ * @shutdown: Async shutdown work
+ * @eventfd: Associated eventfd
+ * @list: Entry within &acrn_vm.irqfds of irqfds of a VM
+ * @pt: Structure for select/poll on the associated eventfd
+ * @msi: MSI data
+ */
+struct hsm_irqfd {
+ struct acrn_vm *vm;
+ wait_queue_entry_t wait;
+ struct work_struct shutdown;
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ poll_table pt;
+ struct acrn_msi_entry msi;
+};
+
+static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
+{
+ struct acrn_vm *vm = irqfd->vm;
+
+ acrn_msi_inject(vm, irqfd->msi.msi_addr,
+ irqfd->msi.msi_data);
+}
+
+static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
+{
+ u64 cnt;
+
+ lockdep_assert_held(&irqfd->vm->irqfds_lock);
+
+ /* remove from wait queue */
+ list_del_init(&irqfd->list);
+ eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
+ eventfd_ctx_put(irqfd->eventfd);
+ kfree(irqfd);
+}
+
+static void hsm_irqfd_shutdown_work(struct work_struct *work)
+{
+ struct hsm_irqfd *irqfd;
+ struct acrn_vm *vm;
+
+ irqfd = container_of(work, struct hsm_irqfd, shutdown);
+ vm = irqfd->vm;
+ mutex_lock(&vm->irqfds_lock);
+ if (!list_empty(&irqfd->list))
+ hsm_irqfd_shutdown(irqfd);
+ mutex_unlock(&vm->irqfds_lock);
+}
+
+/* Called with wqh->lock held and interrupts disabled */
+static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ unsigned long poll_bits = (unsigned long)key;
+ struct hsm_irqfd *irqfd;
+ struct acrn_vm *vm;
+
+ irqfd = container_of(wait, struct hsm_irqfd, wait);
+ vm = irqfd->vm;
+ if (poll_bits & POLLIN)
+ /* An event has been signaled, inject an interrupt */
+ acrn_irqfd_inject(irqfd);
+
+ if (poll_bits & POLLHUP)
+ /* Do shutdown work in thread to hold wqh->lock */
+ queue_work(vm->irqfd_wq, &irqfd->shutdown);
+
+ return 0;
+}
+
+static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct hsm_irqfd *irqfd;
+
+ irqfd = container_of(pt, struct hsm_irqfd, pt);
+ add_wait_queue(wqh, &irqfd->wait);
+}
+
+/*
+ * Assign an eventfd to a VM and create a HSM irqfd associated with the
+ * eventfd. The properties of the HSM irqfd are built from a &struct
+ * acrn_irqfd.
+ */
+static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
+{
+ struct eventfd_ctx *eventfd = NULL;
+ struct hsm_irqfd *irqfd, *tmp;
+ unsigned int events;
+ struct fd f;
+ int ret = 0;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->vm = vm;
+ memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
+ INIT_LIST_HEAD(&irqfd->list);
+ INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
+
+ f = fdget(args->fd);
+ if (!f.file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ eventfd = eventfd_ctx_fileget(f.file);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->eventfd = eventfd;
+
+ /*
+ * Install custom wake-up handling to be notified whenever underlying
+ * eventfd is signaled.
+ */
+ init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
+ init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
+
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry(tmp, &vm->irqfds, list) {
+ if (irqfd->eventfd != tmp->eventfd)
+ continue;
+ ret = -EBUSY;
+ mutex_unlock(&vm->irqfds_lock);
+ goto fail;
+ }
+ list_add_tail(&irqfd->list, &vm->irqfds);
+ mutex_unlock(&vm->irqfds_lock);
+
+ /* Check the pending event in this stage */
+ events = f.file->f_op->poll(f.file, &irqfd->pt);
+
+ if (events & POLLIN)
+ acrn_irqfd_inject(irqfd);
+
+ fdput(f);
+ return 0;
+fail:
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ fdput(f);
+out:
+ kfree(irqfd);
+ return ret;
+}
+
+static int acrn_irqfd_deassign(struct acrn_vm *vm,
+ struct acrn_irqfd *args)
+{
+ struct hsm_irqfd *irqfd, *tmp;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
+ if (irqfd->eventfd == eventfd) {
+ hsm_irqfd_shutdown(irqfd);
+ break;
+ }
+ }
+ mutex_unlock(&vm->irqfds_lock);
+ eventfd_ctx_put(eventfd);
+
+ return 0;
+}
+
+int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
+{
+ int ret;
+
+ if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
+ ret = acrn_irqfd_deassign(vm, args);
+ else
+ ret = acrn_irqfd_assign(vm, args);
+
+ return ret;
+}
+
+int acrn_irqfd_init(struct acrn_vm *vm)
+{
+ INIT_LIST_HEAD(&vm->irqfds);
+ mutex_init(&vm->irqfds_lock);
+ vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
+ if (!vm->irqfd_wq)
+ return -ENOMEM;
+
+ dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
+ return 0;
+}
+
+void acrn_irqfd_deinit(struct acrn_vm *vm)
+{
+ struct hsm_irqfd *irqfd, *next;
+
+ dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
+ destroy_workqueue(vm->irqfd_wq);
+ mutex_lock(&vm->irqfds_lock);
+ list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
+ hsm_irqfd_shutdown(irqfd);
+ mutex_unlock(&vm->irqfds_lock);
+}
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
new file mode 100644
index 000000000000..c4f2e15c8a2b
--- /dev/null
+++ b/drivers/virt/acrn/mm.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN: Memory mapping management
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Fei Li <lei1.li@intel.com>
+ * Shuo Liu <shuo.a.liu@intel.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
+{
+ struct vm_memory_region_batch *regions;
+ int ret;
+
+ regions = kzalloc(sizeof(*regions), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ regions->vmid = vm->vmid;
+ regions->regions_num = 1;
+ regions->regions_gpa = virt_to_phys(region);
+
+ ret = hcall_set_memory_regions(virt_to_phys(regions));
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set memory region for VM[%u]!\n", vm->vmid);
+
+ kfree(regions);
+ return ret;
+}
+
+/**
+ * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
+ * @vm: User VM.
+ * @user_gpa: A GPA of User VM.
+ * @service_gpa: A GPA of Service VM.
+ * @size: Size of the region.
+ * @mem_type: Combination of ACRN_MEM_TYPE_*.
+ * @mem_access_right: Combination of ACRN_MEM_ACCESS_*.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
+ u64 size, u32 mem_type, u32 mem_access_right)
+{
+ struct vm_memory_region_op *region;
+ int ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->type = ACRN_MEM_REGION_ADD;
+ region->user_vm_pa = user_gpa;
+ region->service_vm_pa = service_gpa;
+ region->size = size;
+ region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
+ (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
+ ret = modify_region(vm, region);
+
+ dev_dbg(acrn_dev.this_device,
+ "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
+ __func__, (void *)user_gpa, (void *)service_gpa, size);
+ kfree(region);
+ return ret;
+}
+
+/**
+ * acrn_mm_region_del() - Del the EPT mapping of a memory region.
+ * @vm: User VM.
+ * @user_gpa: A GPA of the User VM.
+ * @size: Size of the region.
+ *
+ * Return: 0 on success, <0 for error.
+ */
+int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
+{
+ struct vm_memory_region_op *region;
+ int ret = 0;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->type = ACRN_MEM_REGION_DEL;
+ region->user_vm_pa = user_gpa;
+ region->service_vm_pa = 0UL;
+ region->size = size;
+ region->attr = 0U;
+
+ ret = modify_region(vm, region);
+
+ dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
+ __func__, (void *)user_gpa, size);
+ kfree(region);
+ return ret;
+}
+
+int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ int ret;
+
+ if (memmap->type == ACRN_MEMMAP_RAM)
+ return acrn_vm_ram_map(vm, memmap);
+
+ if (memmap->type != ACRN_MEMMAP_MMIO) {
+ dev_dbg(acrn_dev.this_device,
+ "Invalid memmap type: %u\n", memmap->type);
+ return -EINVAL;
+ }
+
+ ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
+ memmap->service_vm_pa, memmap->len,
+ ACRN_MEM_TYPE_UC, memmap->attr);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Add memory region failed, VM[%u]!\n", vm->vmid);
+
+ return ret;
+}
+
+int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ int ret;
+
+ if (memmap->type != ACRN_MEMMAP_MMIO) {
+ dev_dbg(acrn_dev.this_device,
+ "Invalid memmap type: %u\n", memmap->type);
+ return -EINVAL;
+ }
+
+ ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
+ if (ret < 0)
+ dev_dbg(acrn_dev.this_device,
+ "Del memory region failed, VM[%u]!\n", vm->vmid);
+
+ return ret;
+}
+
+/**
+ * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
+ * @vm: The User VM pointer
+ * @memmap: Info of the EPT mapping
+ *
+ * Return: 0 on success, <0 for error.
+ */
+int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
+{
+ struct vm_memory_region_batch *regions_info;
+ int nr_pages, i = 0, order, nr_regions = 0;
+ struct vm_memory_mapping *region_mapping;
+ struct vm_memory_region_op *vm_region;
+ struct page **pages = NULL, *page;
+ void *remap_vaddr;
+ int ret, pinned;
+ u64 user_vm_pa;
+
+ if (!vm || !memmap)
+ return -EINVAL;
+
+ /* Get the page number of the map region */
+ nr_pages = memmap->len >> PAGE_SHIFT;
+ pages = vzalloc(nr_pages * sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+
+ /* Lock the pages of user memory map region */
+ pinned = pin_user_pages_fast(memmap->vma_base,
+ nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto free_pages;
+ } else if (pinned != nr_pages) {
+ ret = -EFAULT;
+ goto put_pages;
+ }
+
+ /* Create a kernel map for the map region */
+ remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!remap_vaddr) {
+ ret = -ENOMEM;
+ goto put_pages;
+ }
+
+ /* Record Service VM va <-> User VM pa mapping */
+ mutex_lock(&vm->regions_mapping_lock);
+ region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
+ if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
+ region_mapping->pages = pages;
+ region_mapping->npages = nr_pages;
+ region_mapping->size = memmap->len;
+ region_mapping->service_vm_va = remap_vaddr;
+ region_mapping->user_vm_pa = memmap->user_vm_pa;
+ vm->regions_mapping_count++;
+ } else {
+ dev_warn(acrn_dev.this_device,
+ "Run out of memory mapping slots!\n");
+ ret = -ENOMEM;
+ mutex_unlock(&vm->regions_mapping_lock);
+ goto unmap_no_count;
+ }
+ mutex_unlock(&vm->regions_mapping_lock);
+
+ /* Calculate count of vm_memory_region_op */
+ while (i < nr_pages) {
+ page = pages[i];
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ order = compound_order(page);
+ nr_regions++;
+ i += 1 << order;
+ }
+
+ /* Prepare the vm_memory_region_batch */
+ regions_info = kzalloc(sizeof(*regions_info) +
+ sizeof(*vm_region) * nr_regions,
+ GFP_KERNEL);
+ if (!regions_info) {
+ ret = -ENOMEM;
+ goto unmap_kernel_map;
+ }
+
+ /* Fill each vm_memory_region_op */
+ vm_region = (struct vm_memory_region_op *)(regions_info + 1);
+ regions_info->vmid = vm->vmid;
+ regions_info->regions_num = nr_regions;
+ regions_info->regions_gpa = virt_to_phys(vm_region);
+ user_vm_pa = memmap->user_vm_pa;
+ i = 0;
+ while (i < nr_pages) {
+ u32 region_size;
+
+ page = pages[i];
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ order = compound_order(page);
+ region_size = PAGE_SIZE << order;
+ vm_region->type = ACRN_MEM_REGION_ADD;
+ vm_region->user_vm_pa = user_vm_pa;
+ vm_region->service_vm_pa = page_to_phys(page);
+ vm_region->size = region_size;
+ vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
+ (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
+
+ vm_region++;
+ user_vm_pa += region_size;
+ i += 1 << order;
+ }
+
+ /* Inform the ACRN Hypervisor to set up EPT mappings */
+ ret = hcall_set_memory_regions(virt_to_phys(regions_info));
+ if (ret < 0) {
+ dev_dbg(acrn_dev.this_device,
+ "Failed to set regions, VM[%u]!\n", vm->vmid);
+ goto unset_region;
+ }
+ kfree(regions_info);
+
+ dev_dbg(acrn_dev.this_device,
+ "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
+ __func__, vm->vmid,
+ remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
+ return ret;
+
+unset_region:
+ kfree(regions_info);
+unmap_kernel_map:
+ mutex_lock(&vm->regions_mapping_lock);
+ vm->regions_mapping_count--;
+ mutex_unlock(&vm->regions_mapping_lock);
+unmap_no_count:
+ vunmap(remap_vaddr);
+put_pages:
+ for (i = 0; i < pinned; i++)
+ unpin_user_page(pages[i]);
+free_pages:
+ vfree(pages);
+ return ret;
+}
+
+/**
+ * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
+ * @vm: The User VM
+ */
+void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
+{
+ struct vm_memory_mapping *region_mapping;
+ int i, j;
+
+ mutex_lock(&vm->regions_mapping_lock);
+ for (i = 0; i < vm->regions_mapping_count; i++) {
+ region_mapping = &vm->regions_mapping[i];
+ vunmap(region_mapping->service_vm_va);
+ for (j = 0; j < region_mapping->npages; j++)
+ unpin_user_page(region_mapping->pages[j]);
+ vfree(region_mapping->pages);
+ }
+ mutex_unlock(&vm->regions_mapping_lock);
+}
diff --git a/drivers/virt/acrn/vm.c b/drivers/virt/acrn/vm.c
new file mode 100644
index 000000000000..7804a2492ad7
--- /dev/null
+++ b/drivers/virt/acrn/vm.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACRN_HSM: Virtual Machine management
+ *
+ * Copyright (C) 2020 Intel Corporation. All rights reserved.
+ *
+ * Authors:
+ * Jason Chen CJ <jason.cj.chen@intel.com>
+ * Yakui Zhao <yakui.zhao@intel.com>
+ */
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "acrn_drv.h"
+
+/* List of VMs */
+LIST_HEAD(acrn_vm_list);
+/*
+ * acrn_vm_list is read in a worker thread which dispatch I/O requests and
+ * is wrote in VM creation ioctl. Use the rwlock mechanism to protect it.
+ */
+DEFINE_RWLOCK(acrn_vm_list_lock);
+
+struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
+ struct acrn_vm_creation *vm_param)
+{
+ int ret;
+
+ ret = hcall_create_vm(virt_to_phys(vm_param));
+ if (ret < 0 || vm_param->vmid == ACRN_INVALID_VMID) {
+ dev_err(acrn_dev.this_device,
+ "Failed to create VM! Error: %d\n", ret);
+ return NULL;
+ }
+
+ mutex_init(&vm->regions_mapping_lock);
+ INIT_LIST_HEAD(&vm->ioreq_clients);
+ spin_lock_init(&vm->ioreq_clients_lock);
+ vm->vmid = vm_param->vmid;
+ vm->vcpu_num = vm_param->vcpu_num;
+
+ if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) {
+ hcall_destroy_vm(vm_param->vmid);
+ vm->vmid = ACRN_INVALID_VMID;
+ return NULL;
+ }
+
+ write_lock_bh(&acrn_vm_list_lock);
+ list_add(&vm->list, &acrn_vm_list);
+ write_unlock_bh(&acrn_vm_list_lock);
+
+ acrn_ioeventfd_init(vm);
+ acrn_irqfd_init(vm);
+ dev_dbg(acrn_dev.this_device, "VM %u created.\n", vm->vmid);
+ return vm;
+}
+
+int acrn_vm_destroy(struct acrn_vm *vm)
+{
+ int ret;
+
+ if (vm->vmid == ACRN_INVALID_VMID ||
+ test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
+ return 0;
+
+ /* Remove from global VM list */
+ write_lock_bh(&acrn_vm_list_lock);
+ list_del_init(&vm->list);
+ write_unlock_bh(&acrn_vm_list_lock);
+
+ acrn_ioeventfd_deinit(vm);
+ acrn_irqfd_deinit(vm);
+ acrn_ioreq_deinit(vm);
+
+ if (vm->monitor_page) {
+ put_page(vm->monitor_page);
+ vm->monitor_page = NULL;
+ }
+
+ ret = hcall_destroy_vm(vm->vmid);
+ if (ret < 0) {
+ dev_err(acrn_dev.this_device,
+ "Failed to destroy VM %u\n", vm->vmid);
+ clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
+ return ret;
+ }
+
+ acrn_vm_all_ram_unmap(vm);
+
+ dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
+ vm->vmid = ACRN_INVALID_VMID;
+ return 0;
+}
+
+/**
+ * acrn_inject_msi() - Inject a MSI interrupt into a User VM
+ * @vm: User VM
+ * @msi_addr: The MSI address
+ * @msi_data: The MSI data
+ *
+ * Return: 0 on success, <0 on error
+ */
+int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data)
+{
+ struct acrn_msi_entry *msi;
+ int ret;
+
+ /* might be used in interrupt context, so use GFP_ATOMIC */
+ msi = kzalloc(sizeof(*msi), GFP_ATOMIC);
+ if (!msi)
+ return -ENOMEM;
+
+ /*
+ * msi_addr: addr[19:12] with dest vcpu id
+ * msi_data: data[7:0] with vector
+ */
+ msi->msi_addr = msi_addr;
+ msi->msi_data = msi_data;
+ ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
+ if (ret < 0)
+ dev_err(acrn_dev.this_device,
+ "Failed to inject MSI to VM %u!\n", vm->vmid);
+ kfree(msi);
+ return ret;
+}
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index ea05af41ec69..8d195e3f8301 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -468,7 +468,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
* Cancellation fun.
*/
static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
- u32 timeout_ms, bool *leak_it)
+ u32 timeout_ms, bool interruptible, bool *leak_it)
{
int rc, cancel_rc, ret;
long timeout;
@@ -495,10 +495,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
else
timeout = msecs_to_jiffies(timeout_ms);
- timeout = wait_event_interruptible_timeout(
- gdev->hgcm_wq,
- hgcm_req_done(gdev, &call->header),
- timeout);
+ if (interruptible) {
+ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+ } else {
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+ }
/* timeout > 0 means hgcm_req_done has returned true, so success */
if (timeout > 0)
@@ -631,7 +636,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
hgcm_call_init_call(call, client_id, function, parms, parm_count,
bounce_bufs);
- ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
+ ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
+ requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
if (ret == 0) {
*vbox_status = call->header.result;
ret = hgcm_call_copy_back_result(call, parms, parm_count,