diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/cmd_parser.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 264 |
1 files changed, 153 insertions, 111 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 49af94627c8a..c8454ac43fae 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s) print_opcode(cmd_val(s, 0), s->ring_id); - /* print the whole page to trace */ - pr_err(" ip_va=%p: %08x %08x %08x %08x\n", - s->ip_va, cmd_val(s, 0), cmd_val(s, 1), - cmd_val(s, 2), cmd_val(s, 3)); - s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); while (cnt < 1024) { - pr_err("ip_va=%p: ", s->ip_va); + gvt_dbg_cmd("ip_va=%p: ", s->ip_va); for (i = 0; i < 8; i++) - pr_err("%08x ", cmd_val(s, i)); - pr_err("\n"); + gvt_dbg_cmd("%08x ", cmd_val(s, i)); + gvt_dbg_cmd("\n"); s->ip_va += 8 * sizeof(u32); cnt += 8; @@ -825,11 +820,26 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", offset, data); - return -EINVAL; + return -EPERM; } return 0; } +static inline bool is_mocs_mmio(unsigned int offset) +{ + return ((offset >= 0xc800) && (offset <= 0xcff8)) || + ((offset >= 0xb020) && (offset <= 0xb0a0)); +} + +static int mocs_cmd_reg_handler(struct parser_exec_state *s, + unsigned int offset, unsigned int index) +{ + if (!is_mocs_mmio(offset)) + return -EINVAL; + vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1); + return 0; +} + static int cmd_reg_handler(struct parser_exec_state *s, unsigned int offset, unsigned int index, char *cmd) { @@ -839,7 +849,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (offset + 4 > gvt->device_info.mmio_size) { gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", cmd, offset); - return -EINVAL; + return -EFAULT; } if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { @@ -853,10 +863,14 @@ static int cmd_reg_handler(struct parser_exec_state *s, return 0; } - if (is_force_nonpriv_mmio(offset) && - force_nonpriv_reg_handler(s, offset, index)) + if (is_mocs_mmio(offset) && + mocs_cmd_reg_handler(s, offset, index)) return -EINVAL; + if (is_force_nonpriv_mmio(offset) && + force_nonpriv_reg_handler(s, offset, index)) + return -EPERM; + if (offset == i915_mmio_reg_offset(DERRMR) || offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ @@ -894,11 +908,14 @@ static int cmd_handler_lri(struct parser_exec_state *s) i915_mmio_reg_offset(DERRMR)) ret |= 0; else - ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; + ret |= (cmd_reg_inhibit(s, i)) ? + -EBADRQC : 0; } if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); + if (ret) + break; } return ret; } @@ -912,11 +929,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s) if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) ret |= ((cmd_reg_inhibit(s, i) || (cmd_reg_inhibit(s, i + 1)))) ? - -EINVAL : 0; + -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); + if (ret) + break; ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); + if (ret) + break; } return ret; } @@ -934,15 +955,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s) for (i = 1; i < cmd_len;) { if (IS_BROADWELL(gvt->dev_priv)) - ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0; + ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); + if (ret) + break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); + if (ret) + break; } i += gmadr_dw_number(s) + 1; } @@ -958,11 +983,15 @@ static int cmd_handler_srm(struct parser_exec_state *s) for (i = 1; i < cmd_len;) { ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); + if (ret) + break; if (cmd_val(s, 0) & (1 << 22)) { gma = cmd_gma(s, i + 1); if (gmadr_bytes == 8) gma |= (cmd_gma_hi(s, i + 2)) << 32; ret |= cmd_address_audit(s, gma, sizeof(u32), false); + if (ret) + break; } i += gmadr_dw_number(s) + 1; } @@ -1116,7 +1145,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, v = (dword0 & GENMASK(21, 19)) >> 19; if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code))) - return -EINVAL; + return -EBADRQC; info->pipe = gen8_plane_code[v].pipe; info->plane = gen8_plane_code[v].plane; @@ -1136,7 +1165,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, info->surf_reg = SPRSURF(info->pipe); } else { WARN_ON(1); - return -EINVAL; + return -EBADRQC; } return 0; } @@ -1185,7 +1214,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, default: gvt_vgpu_err("unknown plane code %d\n", plane); - return -EINVAL; + return -EBADRQC; } info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; @@ -1210,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, return 0; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); - tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & + stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); + tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; } else { - stride = (vgpu_vreg(s->vgpu, info->stride_reg) & + stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(15, 6)) >> 6; - tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; + tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; } if (stride != info->stride_val) @@ -1235,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip( struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct intel_vgpu *vgpu = s->vgpu; - set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), + set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), + set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); - set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), + set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), info->tile_val << 10); } else { - set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6), + set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6), info->stride_val << 6); - set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10), + set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10), info->tile_val << 10); } - vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; + vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; intel_vgpu_trigger_virtual_event(vgpu, info->event); return 0; } @@ -1348,10 +1377,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) { unsigned long addr; unsigned long gma_high, gma_low; - int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_vgpu *vgpu = s->vgpu; + int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd; - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) + if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) { + gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes); return INTEL_GVT_INVALID_ADDR; + } gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; if (gmadr_bytes == 4) { @@ -1374,16 +1406,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s, if (op_size > max_surface_size) { gvt_vgpu_err("command address audit fail name %s\n", s->info->name); - return -EINVAL; + return -EFAULT; } if (index_mode) { - if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) { - ret = -EINVAL; + if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { + ret = -EFAULT; goto err; } } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) { - ret = -EINVAL; + ret = -EFAULT; goto err; } @@ -1439,7 +1471,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s) gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); - return -EINVAL; + return -EBADRQC; } static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) @@ -1545,10 +1577,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, return -EFAULT; } - offset = gma & (GTT_PAGE_SIZE - 1); + offset = gma & (I915_GTT_PAGE_SIZE - 1); - copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ? - GTT_PAGE_SIZE - offset : end_gma - gma; + copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? + I915_GTT_PAGE_SIZE - offset : end_gma - gma; intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); @@ -1576,110 +1608,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) return 1; } -static int find_bb_size(struct parser_exec_state *s) +static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) { unsigned long gma = 0; struct cmd_info *info; - int bb_size = 0; uint32_t cmd_len = 0; - bool met_bb_end = false; + bool bb_end = false; struct intel_vgpu *vgpu = s->vgpu; u32 cmd; + *bb_size = 0; + /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); - cmd = cmd_val(s, 0); + if (gma == INTEL_GVT_INVALID_ADDR) + return -EFAULT; + cmd = cmd_val(s, 0); info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } do { - copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, - gma, gma + 4, &cmd); + if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, + gma, gma + 4, &cmd) < 0) + return -EFAULT; info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } if (info->opcode == OP_MI_BATCH_BUFFER_END) { - met_bb_end = true; + bb_end = true; } else if (info->opcode == OP_MI_BATCH_BUFFER_START) { - if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) { + if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) /* chained batch buffer */ - met_bb_end = true; - } + bb_end = true; } cmd_len = get_cmd_length(info, cmd) << 2; - bb_size += cmd_len; + *bb_size += cmd_len; gma += cmd_len; + } while (!bb_end); - } while (!met_bb_end); - - return bb_size; + return 0; } static int perform_bb_shadow(struct parser_exec_state *s) { - struct intel_shadow_bb_entry *entry_obj; struct intel_vgpu *vgpu = s->vgpu; + struct intel_vgpu_shadow_bb *bb; unsigned long gma = 0; - int bb_size; - void *dst = NULL; + unsigned long bb_size; int ret = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); + if (gma == INTEL_GVT_INVALID_ADDR) + return -EFAULT; - /* get the size of the batch buffer */ - bb_size = find_bb_size(s); - if (bb_size < 0) - return -EINVAL; + ret = find_bb_size(s, &bb_size); + if (ret) + return ret; - /* allocate shadow batch buffer */ - entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); - if (entry_obj == NULL) + bb = kzalloc(sizeof(*bb), GFP_KERNEL); + if (!bb) return -ENOMEM; - entry_obj->obj = - i915_gem_object_create(s->vgpu->gvt->dev_priv, - roundup(bb_size, PAGE_SIZE)); - if (IS_ERR(entry_obj->obj)) { - ret = PTR_ERR(entry_obj->obj); - goto free_entry; + bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv, + roundup(bb_size, PAGE_SIZE)); + if (IS_ERR(bb->obj)) { + ret = PTR_ERR(bb->obj); + goto err_free_bb; } - entry_obj->len = bb_size; - INIT_LIST_HEAD(&entry_obj->list); - dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB); - if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - goto put_obj; - } + ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush); + if (ret) + goto err_free_obj; - ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); - if (ret) { - gvt_vgpu_err("failed to set shadow batch to CPU\n"); - goto unmap_src; + bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB); + if (IS_ERR(bb->va)) { + ret = PTR_ERR(bb->va); + goto err_finish_shmem_access; } - entry_obj->va = dst; - entry_obj->bb_start_cmd_va = s->ip_va; + if (bb->clflush & CLFLUSH_BEFORE) { + drm_clflush_virt_range(bb->va, bb->obj->base.size); + bb->clflush &= ~CLFLUSH_BEFORE; + } - /* copy batch buffer to shadow batch buffer*/ ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm, gma, gma + bb_size, - dst); + bb->va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); - goto unmap_src; + ret = -EFAULT; + goto err_unmap; } - list_add(&entry_obj->list, &s->workload->shadow_bb); + INIT_LIST_HEAD(&bb->list); + list_add(&bb->list, &s->workload->shadow_bb); + + bb->accessing = true; + bb->bb_start_cmd_va = s->ip_va; + /* * ip_va saves the virtual address of the shadow batch buffer, while * ip_gma saves the graphics address of the original batch buffer. @@ -1688,17 +1723,17 @@ static int perform_bb_shadow(struct parser_exec_state *s) * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). */ - s->ip_va = dst; + s->ip_va = bb->va; s->ip_gma = gma; - return 0; - -unmap_src: - i915_gem_object_unpin_map(entry_obj->obj); -put_obj: - i915_gem_object_put(entry_obj->obj); -free_entry: - kfree(entry_obj); +err_unmap: + i915_gem_object_unpin_map(bb->obj); +err_finish_shmem_access: + i915_gem_obj_finish_shmem_access(bb->obj); +err_free_obj: + i915_gem_object_put(bb->obj); +err_free_bb: + kfree(bb); return ret; } @@ -1710,13 +1745,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); - return -EINVAL; + return -EFAULT; } second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); - return -EINVAL; + return -EFAULT; } s->saved_buf_addr_type = s->buf_addr_type; @@ -1740,7 +1775,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) if (ret < 0) return ret; } - return ret; } @@ -2430,7 +2464,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); - return -EINVAL; + return -EBADRQC; } s->info = info; @@ -2465,6 +2499,10 @@ static inline bool gma_out_of_range(unsigned long gma, return (gma > gma_tail) && (gma < gma_head); } +/* Keep the consistent return type, e.g EBADRQC for unknown + * cmd, EFAULT for invalid address, EPERM for nonpriv. later + * works as the input of VM healthy status. + */ static int command_scan(struct parser_exec_state *s, unsigned long rb_head, unsigned long rb_tail, unsigned long rb_start, unsigned long rb_len) @@ -2487,7 +2525,7 @@ static int command_scan(struct parser_exec_state *s, s->ip_gma, rb_start, gma_bottom); parser_exec_state_dump(s); - return -EINVAL; + return -EFAULT; } if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { gvt_vgpu_err("ip_gma %lx out of range." @@ -2516,7 +2554,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) int ret = 0; /* ring base is page aligned */ - if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE))) + if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE))) return -EINVAL; gma_head = workload->rb_start + workload->rb_head; @@ -2565,7 +2603,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx); /* ring base is page aligned */ - if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) + if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, + I915_GTT_PAGE_SIZE))) return -EINVAL; ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); @@ -2604,6 +2643,7 @@ out: static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; void *shadow_ring_buffer_va; int ring_id = workload->ring_id; @@ -2619,19 +2659,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { - void *va = vgpu->reserve_ring_buffer_va[ring_id]; + if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) { + void *p; + /* realloc the new ring buffer if needed */ - vgpu->reserve_ring_buffer_va[ring_id] = - krealloc(va, workload->rb_len, GFP_KERNEL); - if (!vgpu->reserve_ring_buffer_va[ring_id]) { - gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len, + GFP_KERNEL); + if (!p) { + gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); return -ENOMEM; } - vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; + s->ring_scan_buffer[ring_id] = p; + s->ring_scan_buffer_size[ring_id] = workload->rb_len; } - shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; + shadow_ring_buffer_va = s->ring_scan_buffer[ring_id]; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = shadow_ring_buffer_va; |