diff options
-rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_ioc32.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | 146 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 10 |
8 files changed, 144 insertions, 37 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index bfc419ed9d6c..ceca79b1468d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1340,7 +1340,10 @@ static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); + + if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) + return -EFAULT; + return 0; } int drm_legacy_infobufs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 374b372da58a..3972ebe48463 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -375,7 +375,10 @@ static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) .size = from->buf_size, .low_mark = from->low_mark, .high_mark = from->high_mark}; - return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)); + + if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) + return -EFAULT; + return 0; } static int drm_legacy_infobufs32(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index a5528a360ef4..97d64ceedbb4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -19,7 +19,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_device *pfdev = obj->dev->dev_private; - panfrost_mmu_unmap(bo); + if (bo->is_mapped) + panfrost_mmu_unmap(bo); spin_lock(&pfdev->mm_lock); drm_mm_remove_node(&bo->node); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 045000eb5fcf..6dbcaba020fc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -11,6 +11,7 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct drm_mm_node node; + bool is_mapped; }; static inline diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 762b1bd2a8c2..92ac995dd9c6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -156,6 +156,9 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) struct sg_table *sgt; int ret; + if (WARN_ON(bo->is_mapped)) + return 0; + sgt = drm_gem_shmem_get_pages_sgt(obj); if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); @@ -189,6 +192,7 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); + bo->is_mapped = true; return 0; } @@ -203,6 +207,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) size_t unmapped_len = 0; int ret; + if (WARN_ON(!bo->is_mapped)) + return; + dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len); ret = pm_runtime_get_sync(pfdev->dev); @@ -230,6 +237,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); + bo->is_mapped = false; } static void mmu_tlb_inv_context_s1(void *cookie) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4ff11a0077e1..9506190a0300 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -747,6 +747,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; + dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, + SCATTERLIST_MAX_SEGMENT)); + if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 8b9270f31409..e4e09d47c5c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -136,6 +136,114 @@ static int vmw_close_channel(struct rpc_channel *channel) return 0; } +/** + * vmw_port_hb_out - Send the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @msg: NULL-terminated message. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_out(struct rpc_channel *channel, + const char *msg, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + unsigned long msg_len = strlen(msg); + + if (hb) { + unsigned long bp = channel->cookie_high; + + si = (uintptr_t) msg; + di = channel->cookie_low; + + VMW_PORT_HB_OUT( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + msg_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Send the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) { + unsigned int bytes = min_t(size_t, msg_len, 4); + unsigned long word = 0; + + memcpy(&word, msg, bytes); + msg_len -= bytes; + msg += bytes; + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_SENDPAYLOAD << 16), + word, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + } + + return ecx; +} + +/** + * vmw_port_hb_in - Receive the message payload either through the + * high-bandwidth port if available, or through the backdoor otherwise. + * @channel: The rpc channel. + * @reply: Pointer to buffer holding reply. + * @reply_len: Length of the reply. + * @hb: Whether the high-bandwidth port is available. + * + * Return: The port status. + */ +static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, + unsigned long reply_len, bool hb) +{ + unsigned long si, di, eax, ebx, ecx, edx; + + if (hb) { + unsigned long bp = channel->cookie_low; + + si = channel->cookie_high; + di = (uintptr_t) reply; + + VMW_PORT_HB_IN( + (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, + reply_len, si, di, + VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, bp, + eax, ebx, ecx, edx, si, di); + + return ebx; + } + + /* HB port not available. Retrieve the message 4 bytes at a time. */ + ecx = MESSAGE_STATUS_SUCCESS << 16; + while (reply_len) { + unsigned int bytes = min_t(unsigned long, reply_len, 4); + + si = channel->cookie_high; + di = channel->cookie_low; + + VMW_PORT(VMW_PORT_CMD_MSG | (MSG_TYPE_RECVPAYLOAD << 16), + MESSAGE_STATUS_SUCCESS, si, di, + VMW_HYPERVISOR_PORT | (channel->channel_id << 16), + VMW_HYPERVISOR_MAGIC, + eax, ebx, ecx, edx, si, di); + + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) + break; + + memcpy(reply, &ebx, bytes); + reply_len -= bytes; + reply += bytes; + } + + return ecx; +} /** @@ -148,11 +256,10 @@ static int vmw_close_channel(struct rpc_channel *channel) */ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; size_t msg_len = strlen(msg); int retries = 0; - while (retries < RETRIES) { retries++; @@ -166,23 +273,14 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { - /* Expected success + high-bandwidth. Give up. */ + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { + /* Expected success. Give up. */ return -EINVAL; } /* Send msg */ - si = (uintptr_t) msg; - di = channel->cookie_low; - bp = channel->cookie_high; - - VMW_PORT_HB_OUT( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - msg_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); + ebx = vmw_port_hb_out(channel, msg, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) { return 0; @@ -211,7 +309,7 @@ STACK_FRAME_NON_STANDARD(vmw_send_msg); static int vmw_recv_msg(struct rpc_channel *channel, void **msg, size_t *msg_len) { - unsigned long eax, ebx, ecx, edx, si, di, bp; + unsigned long eax, ebx, ecx, edx, si, di; char *reply; size_t reply_len; int retries = 0; @@ -233,8 +331,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, VMW_HYPERVISOR_MAGIC, eax, ebx, ecx, edx, si, di); - if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || - (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { DRM_ERROR("Failed to get reply size for host message.\n"); return -EINVAL; } @@ -252,17 +349,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, /* Receive buffer */ - si = channel->cookie_high; - di = (uintptr_t) reply; - bp = channel->cookie_low; - - VMW_PORT_HB_IN( - (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG, - reply_len, si, di, - VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16), - VMW_HYPERVISOR_MAGIC, bp, - eax, ebx, ecx, edx, si, di); - + ebx = vmw_port_hb_in(channel, reply, reply_len, + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { kfree(reply); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index a6ea75b58a83..d8ea3dd10af0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -441,11 +441,11 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, - vsgt->num_pages, 0, - (unsigned long) - vsgt->num_pages << PAGE_SHIFT, - GFP_KERNEL); + ret = __sg_alloc_table_from_pages + (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + GFP_KERNEL); if (unlikely(ret != 0)) goto out_sg_alloc_fail; |