summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-06-11 19:09:28 +0300
committerMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-06-11 19:09:28 +0300
commit5800571960234f9d1f1011bf135799b2014d4268 (patch)
tree11a45f08d0e1e8db0c36195732d822058aa29bb7 /drivers/gpu/drm/i915/gvt
parentd17589afa97061440c0a161775672a8a7bfa9d12 (diff)
parentd1fdb6d8f6a4109a4263176c84b899076a5f8008 (diff)
downloadlinux-5800571960234f9d1f1011bf135799b2014d4268.tar.xz
Merge tag 'v5.2-rc4' into media/master
There are some conflicts due to SPDX changes. We also have more patches being merged via media tree touching them. So, let's merge back from upstream and address those. Linux 5.2-rc4 * tag 'v5.2-rc4': (767 commits) Linux 5.2-rc4 MAINTAINERS: Karthikeyan Ramasubramanian is MIA i2c: xiic: Add max_read_len quirk lockref: Limit number of cmpxchg loop retries uaccess: add noop untagged_addr definition x86/insn-eval: Fix use-after-free access to LDT entry kbuild: use more portable 'command -v' for cc-cross-prefix s390/unwind: correct stack switching during unwind block, bfq: add weight symlink to the bfq.weight cgroup parameter cgroup: let a symlink too be created with a cftype file drm/nouveau/secboot/gp10[2467]: support newer FW to fix SEC2 failures on some boards drm/nouveau/secboot: enable loading of versioned LS PMU/SEC2 ACR msgqueue FW drm/nouveau/secboot: split out FW version-specific LS function pointers drm/nouveau/secboot: pass max supported FW version to LS load funcs drm/nouveau/core: support versioned firmware loading drm/nouveau/core: pass subdev into nvkm_firmware_get, rather than device block: free sched's request pool in blk_cleanup_queue pktgen: do not sleep with the thread lock held. net: mvpp2: Use strscpy to handle stat strings net: rds: fix memory leak in rds_ib_flush_mr_pool ... Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
6 files changed, 99 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 5cb59c0b4bbe..de5347725564 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
- 0, 20, NULL},
+ 0, 12, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 244ad1729764..53115bdae12b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
*/
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
- if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
- && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
- addr, size);
- return false;
- }
- return true;
+ if (size == 0)
+ return vgpu_gmadr_is_valid(vgpu, addr);
+
+ if (vgpu_gmadr_is_aperture(vgpu, addr) &&
+ vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
+ return true;
+ else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
+ vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
+ return true;
+
+ gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
+ addr, size);
+ return false;
}
/* translate a guest gmadr to host gmadr */
@@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- cur_pt_type = get_next_pt_type(e->type) + 1;
+ cur_pt_type = get_next_pt_type(e->type);
+
+ if (!gtt_type_is_pt(cur_pt_type) ||
+ !gtt_type_is_pt(cur_pt_type + 1)) {
+ WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ return -EINVAL;
+ }
+
+ cur_pt_type += 1;
+
if (ops->get_pfn(e) ==
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
@@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
err_free_spt:
ppgtt_free_spt(spt);
+ spt = NULL;
err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
@@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn;
- struct intel_gvt_gtt_entry e, m;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
dma_addr_t dma_addr;
int ret;
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
- m = e;
+ m.val64 = e.val64;
+ m.type = e.type;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e09bd6e0cc4d..a6ade66349bd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x2690),
_MMIO(0x2694),
_MMIO(0x2698),
+ _MMIO(0x2754),
+ _MMIO(0x28a0),
_MMIO(0x4de0),
_MMIO(0x4de4),
_MMIO(0x4dfc),
@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
+ if (data & _MASKED_BIT_ENABLE(1)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ data & _MASKED_BIT_ENABLE(2)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
/* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
return 0;
}
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
@@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
+ MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(SKL_DFSM, D_SKL_PLUS);
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
@@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
@@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
+ MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
@@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
- MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, csfe_chicken1_mmio_write);
+#undef CSFE_CHICKEN1_REG
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
- MMIO_D(GEN8_L3SQCREG1, D_BXT);
+ MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 33aaa14bfdde..5b66e14c5b7b 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -102,6 +102,8 @@
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
+#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
+#define RB_HEAD_WRAP_CNT_OFF 21
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 13632dba8b2a..0f919f0a43d4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
void *src;
unsigned long context_gpa, context_page_num;
int i;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 ring_base;
+ u32 head, tail;
+ u16 wrap_count;
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ head = workload->rb_head;
+ tail = workload->rb_tail;
+ wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
+
+ if (tail < head) {
+ if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
+ wrap_count = 0;
+ else
+ wrap_count += 1;
+ }
+
+ head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
+ vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
+
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ u32 guest_head;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
+ guest_head = head;
+
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
@@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
+ workload->guest_rb_head = guest_head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 90c6756f5453..c50d14a9ce85 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ unsigned long guest_rb_head;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;