diff options
author | Dave Airlie <airlied@redhat.com> | 2019-02-18 06:27:15 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-02-18 06:27:15 +0300 |
commit | c06de56121e3ac0f0f1f4a081c041654ffcacd62 (patch) | |
tree | 3662e7052352b0f1b78e8832b1d3b91ca211b798 /drivers/gpu/drm/i915 | |
parent | 8d451a4b6e9f4b52ae3d4cafe17486d8d0c6afb0 (diff) | |
parent | a3b22b9f11d9fbc48b0291ea92259a5a810e9438 (diff) | |
download | linux-c06de56121e3ac0f0f1f4a081c041654ffcacd62.tar.xz |
Merge v5.0-rc7 into drm-next
Backmerging for nouveau and imx that needed some fixes for next pulls.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/hypercall.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mpt.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 75 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_opregion.c | 38 |
7 files changed, 111 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 9c106e47e640..bc64b810e0d5 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5e01cc8d9b16..4862fb12778e 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -47,7 +47,7 @@ struct intel_gvt_mpt { int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev); int (*attach_vgpu)(void *vgpu, unsigned long *handle); - void (*detach_vgpu)(unsigned long handle); + void (*detach_vgpu)(void *vgpu); int (*inject_msi)(unsigned long handle, u32 addr, u16 data); unsigned long (*from_virt_to_mfn)(void *p); int (*enable_page_track)(unsigned long handle, u64 gfn); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 63eef86a2a85..d5fcc447d22f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1147,7 +1147,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) { unsigned int index; u64 virtaddr; - unsigned long req_size, pgoff = 0; + unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); @@ -1165,7 +1165,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) pg_prot = vma->vm_page_prot; virtaddr = vma->vm_start; req_size = vma->vm_end - vma->vm_start; - pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + req_start = pgoff << PAGE_SHIFT; + + if (!intel_vgpu_in_aperture(vgpu, req_start)) + return -EINVAL; + if (req_start + req_size > + vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) + return -EINVAL; + + pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); } @@ -1813,9 +1823,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) return 0; } -static void kvmgt_detach_vgpu(unsigned long handle) +static void kvmgt_detach_vgpu(void *p_vgpu) { - /* nothing to do here */ + int i; + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + if (!vgpu->vdev.region) + return; + + for (i = 0; i < vgpu->vdev.num_regions; i++) + if (vgpu->vdev.region[i].ops->release) + vgpu->vdev.region[i].ops->release(vgpu, + &vgpu->vdev.region[i]); + vgpu->vdev.num_regions = 0; + kfree(vgpu->vdev.region); + vgpu->vdev.region = NULL; } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 5d8b8f228d8f..0f9440128123 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -99,7 +99,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) if (!intel_gvt_host.mpt->detach_vgpu) return; - intel_gvt_host.mpt->detach_vgpu(vgpu->handle); + intel_gvt_host.mpt->detach_vgpu(vgpu); } #define MSI_CAP_CONTROL(offset) (offset + 2) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b7957eefb976..1bb8f936fdaa 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -333,6 +333,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); i915_gem_object_put(wa_ctx->indirect_ctx.obj); + + wa_ctx->indirect_ctx.obj = NULL; + wa_ctx->indirect_ctx.shadow_va = NULL; } static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, @@ -357,6 +360,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, return 0; } +static int +intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_gem_context *shadow_ctx = s->shadow_ctx; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct i915_request *rq; + int ret = 0; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (workload->req) + goto out; + + rq = i915_request_alloc(engine, shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); + goto out; + } + workload->req = i915_request_get(rq); +out: + return ret; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -373,12 +403,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; struct intel_context *ce; - struct i915_request *rq; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) + if (workload->shadow) return 0; ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); @@ -418,22 +447,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) goto err_shadow; } - rq = i915_request_alloc(engine, shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto err_shadow; - } - workload->req = i915_request_get(rq); - - ret = populate_shadow_context(workload); - if (ret) - goto err_req; - + workload->shadow = true; return 0; -err_req: - rq = fetch_and_zero(&workload->req); - i915_request_put(rq); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); err_unpin: @@ -672,23 +687,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); + ret = intel_gvt_workload_req_alloc(workload); + if (ret) + goto err_req; + ret = intel_gvt_scan_and_shadow_workload(workload); if (ret) goto out; - ret = prepare_workload(workload); + ret = populate_shadow_context(workload); + if (ret) { + release_shadow_wa_ctx(&workload->wa_ctx); + goto out; + } + ret = prepare_workload(workload); out: - if (ret) - workload->status = ret; - if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); i915_request_add(workload->req); workload->dispatched = true; } - +err_req: + if (ret) + workload->status = ret; mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; @@ -892,11 +915,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); - if (!workload->status) { - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); - } - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, @@ -1262,6 +1280,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + if (workload->shadow_mm) intel_vgpu_mm_put(workload->shadow_mm); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 1e9eec6a32fe..0635b2c4bed7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,6 +83,7 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; + bool shadow; /* if workload has done shadow of guest request */ int status; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 30ae96c5c97c..5e00ee9270b5 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -54,7 +54,12 @@ struct opregion_header { u8 signature[16]; u32 size; - u32 opregion_ver; + struct { + u8 rsvd; + u8 revision; + u8 minor; + u8 major; + } __packed over; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; @@ -118,7 +123,8 @@ struct opregion_asle { u64 fdss; u32 fdsp; u32 stat; - u64 rvda; /* Physical address of raw vbt data */ + u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) + * address of raw VBT data. */ u32 rvds; /* Size of raw vbt data */ u8 rsvd[58]; } __packed; @@ -924,6 +930,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->header = base; opregion->lid_state = base + ACPI_CLID; + DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", + opregion->header->over.major, + opregion->header->over.minor, + opregion->header->over.revision); + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); @@ -952,11 +963,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (dmi_check_system(intel_no_opregion_vbt)) goto out; - if (opregion->header->opregion_ver >= 2 && opregion->asle && + if (opregion->header->over.major >= 2 && opregion->asle && opregion->asle->rvda && opregion->asle->rvds) { - opregion->rvda = memremap(opregion->asle->rvda, - opregion->asle->rvds, + resource_size_t rvda = opregion->asle->rvda; + + /* + * opregion 2.0: rvda is the physical VBT address. + * + * opregion 2.1+: rvda is unsigned, relative offset from + * opregion base, and should never point within opregion. + */ + if (opregion->header->over.major > 2 || + opregion->header->over.minor >= 1) { + WARN_ON(rvda < OPREGION_SIZE); + + rvda += asls; + } + + opregion->rvda = memremap(rvda, opregion->asle->rvds, MEMREMAP_WB); + vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { @@ -966,6 +992,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) goto out; } else { DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); + memunmap(opregion->rvda); + opregion->rvda = NULL; } } |