diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-02-05 08:58:25 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-02-05 08:58:25 +0300 |
commit | 7e6a6b400db8048bd1c06e497e338388413cf5bc (patch) | |
tree | 794f9fcdc7a1bfb9a2812e90fc76809d810203b2 /drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |
parent | 6e37ec8825a113bc2dd1b280be10e5ac6eb4f6b1 (diff) | |
parent | 1dd498e5e26ad71e3e9130daf72cfb6a693fee03 (diff) | |
download | linux-7e6a6b400db8048bd1c06e497e338388413cf5bc.tar.xz |
Merge tag 'kvmarm-fixes-5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 5.17, take #2
- A couple of fixes when handling an exception while a SError has been
delivered
- Workaround for Cortex-A510's single-step[ erratum
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 117 |
1 files changed, 86 insertions, 31 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 5f2ffa9de5c8..dd2ff441068e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, } /** + * vmw_cmd_dx_set_constant_buffer_offset - Validate + * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int +vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); + + struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); + u32 shader_slot; + + if (!has_sm5_context(dev_priv)) + return -EINVAL; + + if (!ctx_node) + return -EINVAL; + + cmd = container_of(header, typeof(*cmd), header); + if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer slot %u.\n", + (unsigned int) cmd->body.slot); + return -EINVAL; + } + + shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; + vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, + cmd->body.slot, cmd->body.offsetInBytes); + + return 0; +} + +/** * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES * command * @@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, &vmw_cmd_dx_transfer_from_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, true, false, true), @@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { &vmw_cmd_dx_define_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, &vmw_cmd_dx_bind_streamoutput, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, + &vmw_cmd_dx_so_define, true, false, true), }; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) @@ -3823,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ -void +int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, struct sync_file *sync_file) + int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return; + return 0; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3861,20 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - if (sync_file) - fput(sync_file->file); - - if (fence_rep.fd != -1) { - put_unused_fd(fence_rep.fd); - fence_rep.fd = -1; - } - - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, - TTM_REF_USAGE); + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } + + return ret ? -EFAULT : 0; } /** @@ -4054,8 +4103,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct sync_file *sync_file = NULL; DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); - vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); - if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { @@ -4101,6 +4148,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->kernel = true; } + sw_context->filp = file_priv; sw_context->fp = vmw_fpriv(file_priv); INIT_LIST_HEAD(&sw_context->ctx_list); sw_context->cur_query_bo = dev_priv->pinned_bo; @@ -4117,7 +4165,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_binding_state_reset(sw_context->staged_bindings); if (!sw_context->res_ht_initialized) { - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); + ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); if (unlikely(ret != 0)) goto out_unlock; @@ -4212,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); + } + } + + ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd); + + if (sync_file) { + if (ret) { + /* usercopy of fence failed, put the file object */ + fput(sync_file->file); + put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } - vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, out_fence_fd, - sync_file); - /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4239,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_validation_unref_lists(&val_ctx); - return 0; + return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); |