diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 176 |
1 files changed, 83 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 88e12bdf79e2..3aa614731d7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -190,16 +190,21 @@ #define GEN8_CTX_L3LLC_COHERENT (1<<5) #define GEN8_CTX_PRIVILEGE (1<<8) -#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \ +#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \ + (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \ + (reg_state)[(pos)+1] = (val); \ +} while (0) + +#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \ reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \ reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \ -} +} while (0) -#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \ +#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \ reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \ -} +} while (0) enum { ADVANCED_CONTEXT = 0, @@ -284,8 +289,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; - return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || - (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && + return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && (ring->id == VCS || ring->id == VCS2); } @@ -367,7 +372,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_TAIL+1] = rq->tail; @@ -921,7 +926,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_logical_ring_emit(ringbuf, INSTPM); + intel_logical_ring_emit_reg(ringbuf, INSTPM); intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); intel_logical_ring_advance(ringbuf); @@ -1096,7 +1101,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_logical_ring_emit(ringbuf, w->reg[i].addr); + intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr); intel_logical_ring_emit(ringbuf, w->reg[i].value); } intel_logical_ring_emit(ringbuf, MI_NOOP); @@ -1120,6 +1125,8 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) batch[__index] = (cmd); \ } while (0) +#define wa_ctx_emit_reg(batch, index, reg) \ + wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg)) /* * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after @@ -1149,17 +1156,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) + if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, l3sqc4_flush); wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); @@ -1172,7 +1179,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); - wa_ctx_emit(batch, index, GEN8_L3SQCREG4); + wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); @@ -1314,8 +1321,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1340,18 +1347,18 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { + if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); - wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); + wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING)); wa_ctx_emit(batch, index, MI_NOOP); } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || - (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1418,7 +1425,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring) return ret; } - page = i915_gem_object_get_page(wa_ctx->obj, 0); + page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); batch = kmap_atomic(page); offset = 0; @@ -1472,12 +1479,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); - if (ring->status_page.obj) { - I915_WRITE(RING_HWS_PGA(ring->mmio_base), - (u32)ring->status_page.gfx_addr); - POSTING_READ(RING_HWS_PGA(ring->mmio_base)); - } - I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); @@ -1562,9 +1563,9 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i)); intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i)); intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); } @@ -1893,8 +1894,10 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) dev_priv = ring->dev->dev_private; - intel_logical_ring_stop(ring); - WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + if (ring->buffer) { + intel_logical_ring_stop(ring); + WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + } if (ring->cleanup) ring->cleanup(ring); @@ -1908,6 +1911,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) } lrc_destroy_wa_ctx_obj(ring); + ring->dev = NULL; } static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) @@ -1923,17 +1927,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin i915_gem_batch_pool_init(dev, &ring->batch_pool); init_waitqueue_head(&ring->irq_queue); + INIT_LIST_HEAD(&ring->buffers); INIT_LIST_HEAD(&ring->execlist_queue); INIT_LIST_HEAD(&ring->execlist_retired_req_list); spin_lock_init(&ring->execlist_lock); ret = i915_cmd_parser_init_ring(ring); if (ret) - return ret; + goto error; ret = intel_lr_context_deferred_alloc(ring->default_context, ring); if (ret) - return ret; + goto error; /* As this is the default context, always pin it */ ret = intel_lr_context_do_pin( @@ -1944,9 +1949,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", ring->name, ret); - return ret; + goto error; } + return 0; + +error: + intel_logical_ring_cleanup(ring); return ret; } @@ -1972,7 +1981,7 @@ static int logical_render_ring_init(struct drm_device *dev) ring->init_hw = gen8_init_render_ring; ring->init_context = gen8_init_rcs_context; ring->cleanup = intel_fini_pipe_control; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2024,7 +2033,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2079,7 +2088,7 @@ static int logical_blt_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2109,7 +2118,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; ring->init_hw = gen8_init_common_ring; - if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) { + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { ring->get_seqno = bxt_a_get_seqno; ring->set_seqno = bxt_a_set_seqno; } else { @@ -2255,7 +2264,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM @@ -2263,46 +2272,31 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o * only for the first context restore: on a subsequent save, the GPU will * recreate this batchbuffer with new values (including all the missing * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ - if (ring->id == RCS) - reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); - else - reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); - reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; - reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); - reg_state[CTX_CONTEXT_CONTROL+1] = - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | - CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE); - reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); - reg_state[CTX_RING_HEAD+1] = 0; - reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); - reg_state[CTX_RING_TAIL+1] = 0; - reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); + reg_state[CTX_LRI_HEADER_0] = + MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; + ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | + CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE)); + ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); /* Ring buffer start address is not known until the buffer is pinned. * It is written to the context image in execlists_update_context() */ - reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); - reg_state[CTX_RING_BUFFER_CONTROL+1] = - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; - reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; - reg_state[CTX_BB_HEAD_U+1] = 0; - reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; - reg_state[CTX_BB_HEAD_L+1] = 0; - reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; - reg_state[CTX_BB_STATE+1] = (1<<5); - reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; - reg_state[CTX_SECOND_BB_HEAD_U+1] = 0; - reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; - reg_state[CTX_SECOND_BB_HEAD_L+1] = 0; - reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; - reg_state[CTX_SECOND_BB_STATE+1] = 0; + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base), + ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base), + RING_BB_PPGTT); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0); if (ring->id == RCS) { - reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; - reg_state[CTX_BB_PER_CTX_PTR+1] = 0; - reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; - reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; - reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; - reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; + ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0); if (ring->wa_ctx.obj) { struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); @@ -2319,18 +2313,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o 0x01; } } - reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); - reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; - reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; - reg_state[CTX_CTX_TIMESTAMP+1] = 0; - reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); - reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); - reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); - reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); - reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); - reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); - reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); - reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); + reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; + ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0); + /* PDP values well be assigned later if needed */ + ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* 64b PPGTT (48bit canonical) @@ -2352,14 +2345,11 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o if (ring->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); - reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE; - reg_state[CTX_R_PWR_CLK_STATE+1] = make_rpcs(dev); + ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, + make_rpcs(dev)); } kunmap_atomic(reg_state); - - ctx_obj->dirty = 1; - set_page_dirty(page); i915_gem_object_unpin_pages(ctx_obj); return 0; @@ -2543,7 +2533,7 @@ void intel_lr_context_reset(struct drm_device *dev, WARN(1, "Failed get_pages for context obj\n"); continue; } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = kmap_atomic(page); reg_state[CTX_RING_HEAD+1] = 0; |