diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 22 |
4 files changed, 81 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index ab19545d59a1..4ce2e6bd0680 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, return 0; } +static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); + u32 new = *(u32 *)(p_data); + + if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK) + /* We don't have rom, return size of 0. */ + *pval = 0; + else + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); + return 0; +} + static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } switch (rounddown(offset, 4)) { + case PCI_ROM_ADDRESS: + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes); + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; @@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, pci_resource_len(gvt->dev_priv->drm.pdev, 0); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = pci_resource_len(gvt->dev_priv->drm.pdev, 2); + + memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); } /** diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 44cd5ff5e97d..1f840f6b81bb 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt, return 0; } -static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) +/** + * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id + * @gvt: a GVT device + * @offset: register offset + * + * Returns: + * Ring ID on success, negative error code if failed. + */ +int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, + unsigned int offset) { enum intel_engine_id id; struct intel_engine_cs *engine; - reg &= ~GENMASK(11, 0); + offset &= ~GENMASK(11, 0); for_each_engine(engine, gvt->dev_priv, id) { - if (engine->mmio_base == reg) + if (engine->mmio_base == offset) return id; } - return -1; + return -ENODEV; } #define offset_to_fence_num(offset) \ @@ -1398,18 +1407,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ring_id; + u32 ring_base; + + ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset); + /** + * Read HW reg in following case + * a. the offset isn't a ring mmio + * b. the offset's ring is running on hw. + * c. the offset is ring time stamp mmio + */ + if (ring_id >= 0) + ring_base = dev_priv->engine[ring_id]->mmio_base; + + if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] || + offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) || + offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + mmio_hw_access_post(dev_priv); + } - mmio_hw_access_pre(dev_priv); - vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); - mmio_hw_access_post(dev_priv); return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); } static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); + int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; int ret = 0; @@ -1436,7 +1463,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data = *(u32 *)p_data; - int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); + int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; write_vreg(vgpu, offset, p_data, bytes); diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 32cd64ddad26..dbc04ad2c7a1 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -65,6 +65,8 @@ struct intel_gvt_mmio_info { struct hlist_node node; }; +int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, + unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 3ac1dc97a7a0..69f8f0d155b9 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req) return i915_gem_context_force_single_submission(req->ctx); } +static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u32 ring_base = dev_priv->engine[ring_id]->mmio_base; + i915_reg_t reg; + + reg = RING_INSTDONE(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); + reg = RING_ACTHD(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); + reg = RING_ACTHD_UDW(ring_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); +} + static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { @@ -175,9 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb, atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - case INTEL_CONTEXT_SCHEDULE_PREEMPTED: + save_ring_hw_state(workload->vgpu, ring_id); atomic_set(&workload->shadow_ctx_active, 0); break; + case INTEL_CONTEXT_SCHEDULE_PREEMPTED: + save_ring_hw_state(workload->vgpu, ring_id); + break; default: WARN_ON(1); return NOTIFY_OK; @@ -740,6 +757,9 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) if (IS_ERR(vgpu->shadow_ctx)) return PTR_ERR(vgpu->shadow_ctx); + if (INTEL_INFO(vgpu->gvt->dev_priv)->has_logical_ring_preemption) + vgpu->shadow_ctx->priority = INT_MAX; + vgpu->shadow_ctx->engine[RCS].initialised = true; bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); |