diff options
author | Changbin Du <changbin.du@intel.com> | 2017-08-15 08:14:04 +0300 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-09-08 09:21:13 +0300 |
commit | f090a00df9ecdab5d066b099c1797e0070e27a36 (patch) | |
tree | 72a835f98ed37eb6213133e04bbc712fb90f04e3 /drivers/gpu/drm/i915/gvt/kvmgt.c | |
parent | 5d5fe176155e6cfa4a53accb90e4010baa5266d0 (diff) | |
download | linux-f090a00df9ecdab5d066b099c1797e0070e27a36.tar.xz |
drm/i915/gvt: Add emulation for BAR2 (aperture) with normal file RW approach
For vfio-pci, if the region support MMAP then it should support both
mmap and normal file access. The user-space is free to choose which is
being used. For qemu, we just need add 'x-no-mmap=on' for vfio-pci
option.
Currently GVTg only support MMAP for BAR2. So GVTg will not work when
user turn on x-no-mmap option.
This patch added file style access for BAR2, aka the GPU aperture. We
map the entire aperture partition of active vGPU to kernel space when
guest driver try to enable PCI Memory Space. Then we redirect the file
RW operation from kvmgt to this mapped area.
Link: https://bugzilla.redhat.com/show_bug.cgi?id=1458032
Signed-off-by: Changbin Du <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/kvmgt.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 42 |
1 files changed, 25 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 9201db0892f1..ae65268efce3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work) __intel_vgpu_release(vgpu); } -static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) +static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) { u32 start_lo, start_hi; u32 mem_type; - int pos = PCI_BASE_ADDRESS_0; - start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_MASK; - mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_64: start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space - + pos + 4)); + + bar + 4)); break; case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: @@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) return ((u64)start_hi << 32) | start_lo; } +static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, + void *buf, unsigned int count, bool is_write) +{ + uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar); + int ret; + + if (is_write) + ret = intel_gvt_ops->emulate_mmio_write(vgpu, + bar_start + off, buf, count); + else + ret = intel_gvt_ops->emulate_mmio_read(vgpu, + bar_start + off, buf, count); + return ret; +} + static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { @@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: - if (is_write) { - uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); - - ret = intel_gvt_ops->emulate_mmio_write(vgpu, - bar0_start + pos, buf, count); - } else { - uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); - - ret = intel_gvt_ops->emulate_mmio_read(vgpu, - bar0_start + pos, buf, count); - } + ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos, + buf, count, is_write); break; - case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR2_REGION_INDEX: + ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, + buf, count, is_write); + break; + case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: case VFIO_PCI_BAR4_REGION_INDEX: case VFIO_PCI_BAR5_REGION_INDEX: |