From 735581a0a13c58e6ff7eaf7a1087e1e5d917cabe Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Fri, 20 Apr 2018 09:19:01 +0200 Subject: qxl: remove qxl_io_log() qxl_io_log() sends messages over to the host (qemu) for logging. Remove the function and all callers, we can just use standard DRM_DEBUG calls (and if needed a serial console). Signed-off-by: Gerd Hoffmann Reviewed-by: Dave Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-2-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_cmd.c | 34 ++-------------------------------- drivers/gpu/drm/qxl/qxl_display.c | 27 ++++----------------------- drivers/gpu/drm/qxl/qxl_drv.h | 3 --- drivers/gpu/drm/qxl/qxl_fb.c | 2 -- drivers/gpu/drm/qxl/qxl_irq.c | 3 +-- 5 files changed, 7 insertions(+), 62 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index c0fb52c6d4ca..850f8d7d37ce 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -341,12 +341,9 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, surface_height = surf->surf.height; if (area->left < 0 || area->top < 0 || - area->right > surface_width || area->bottom > surface_height) { - qxl_io_log(qdev, "%s: not doing area update for " - "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left, - area->top, area->right, area->bottom, surface_width, surface_height); + area->right > surface_width || area->bottom > surface_height) return -EINVAL; - } + mutex_lock(&qdev->update_area_mutex); qdev->ram_header->update_area = *area; qdev->ram_header->update_surface = surface_id; @@ -407,20 +404,6 @@ void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); } -void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) -{ - va_list args; - - va_start(args, fmt); - vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); - va_end(args); - /* - * DO not do a DRM output here - this will call printk, which will - * call back into qxl for rendering (qxl_fb) - */ - outb(0, qdev->io_base + QXL_IO_LOG); -} - void qxl_io_reset(struct qxl_device *qdev) { outb(0, qdev->io_base + QXL_IO_RESET); @@ -428,19 +411,6 @@ void qxl_io_reset(struct qxl_device *qdev) void qxl_io_monitors_config(struct qxl_device *qdev) { - qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__, - qdev->monitors_config ? - qdev->monitors_config->count : -1, - qdev->monitors_config && qdev->monitors_config->count ? - qdev->monitors_config->heads[0].width : -1, - qdev->monitors_config && qdev->monitors_config->count ? - qdev->monitors_config->heads[0].height : -1, - qdev->monitors_config && qdev->monitors_config->count ? - qdev->monitors_config->heads[0].x : -1, - qdev->monitors_config && qdev->monitors_config->count ? - qdev->monitors_config->heads[0].y : -1 - ); - wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 820cbca3bf6e..5809c6c6e7b7 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -48,12 +48,8 @@ static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned c qdev->client_monitors_config = kzalloc( sizeof(struct qxl_monitors_config) + sizeof(struct qxl_head) * count, GFP_KERNEL); - if (!qdev->client_monitors_config) { - qxl_io_log(qdev, - "%s: allocation failure for %u heads\n", - __func__, count); + if (!qdev->client_monitors_config) return; - } } qdev->client_monitors_config->count = count; } @@ -74,12 +70,8 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) num_monitors = qdev->rom->client_monitors_config.count; crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, sizeof(qdev->rom->client_monitors_config)); - if (crc != qdev->rom->client_monitors_config_crc) { - qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc, - sizeof(qdev->rom->client_monitors_config), - qdev->rom->client_monitors_config_crc); + if (crc != qdev->rom->client_monitors_config_crc) return MONITORS_CONFIG_BAD_CRC; - } if (!num_monitors) { DRM_DEBUG_KMS("no client monitors configured\n"); return status; @@ -170,12 +162,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev) udelay(5); } if (status == MONITORS_CONFIG_BAD_CRC) { - qxl_io_log(qdev, "config: bad crc\n"); DRM_DEBUG_KMS("ignoring client monitors config: bad crc"); return; } if (status == MONITORS_CONFIG_UNCHANGED) { - qxl_io_log(qdev, "config: unchanged\n"); DRM_DEBUG_KMS("ignoring client monitors config: unchanged"); return; } @@ -385,14 +375,6 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = crtc->dev; - struct qxl_device *qdev = dev->dev_private; - - qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n", - __func__, - mode->hdisplay, mode->vdisplay, - adjusted_mode->hdisplay, - adjusted_mode->vdisplay); return true; } @@ -403,10 +385,9 @@ qxl_send_monitors_config(struct qxl_device *qdev) BUG_ON(!qdev->ram_header->monitors_config); - if (qdev->monitors_config->count == 0) { - qxl_io_log(qdev, "%s: 0 monitors??\n", __func__); + if (qdev->monitors_config->count == 0) return; - } + for (i = 0 ; i < qdev->monitors_config->count ; ++i) { struct qxl_head *head = &qdev->monitors_config->heads[i]; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 00a1a66b052a..4b8984017373 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -298,9 +298,6 @@ struct qxl_device { int monitors_config_height; }; -/* forward declaration for QXL_INFO_IO */ -__printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); - extern const struct drm_ioctl_desc qxl_ioctls[]; extern int qxl_max_ioctl; diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 338891401f35..9a6752606079 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -185,8 +185,6 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb, /* * we are using a shadow draw buffer, at qdev->surface0_shadow */ - qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2, - clips->y1, clips->y2); image->dx = clips->x1; image->dy = clips->y1; image->width = clips->x2 - clips->x1; diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 23a40106ab53..3bb31add6350 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -57,10 +57,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg) * to avoid endless loops). */ qdev->irq_received_error++; - qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__); + DRM_WARN("driver is in bug mode\n"); } if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { - qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n"); schedule_work(&qdev->client_monitors_config_work); } qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; -- cgit v1.2.3 From 998010bfae6ebaac68af905bef9f6e276f775254 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Fri, 20 Apr 2018 09:19:02 +0200 Subject: qxl: move qxl_send_monitors_config() Needed to avoid a forward declaration in a followup patch. Pure code move, no functional change. Signed-off-by: Gerd Hoffmann Reviewed-by: Dave Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-3-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_display.c | 47 +++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 24 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5809c6c6e7b7..7d08a26c3a8b 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -258,6 +258,29 @@ static int qxl_add_common_modes(struct drm_connector *connector, return i - 1; } +static void qxl_send_monitors_config(struct qxl_device *qdev) +{ + int i; + + BUG_ON(!qdev->ram_header->monitors_config); + + if (qdev->monitors_config->count == 0) + return; + + for (i = 0 ; i < qdev->monitors_config->count ; ++i) { + struct qxl_head *head = &qdev->monitors_config->heads[i]; + + if (head->y > 8192 || head->x > 8192 || + head->width > 8192 || head->height > 8192) { + DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", + i, head->width, head->height, + head->x, head->y); + return; + } + } + qxl_io_monitors_config(qdev); +} + static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -378,30 +401,6 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static void -qxl_send_monitors_config(struct qxl_device *qdev) -{ - int i; - - BUG_ON(!qdev->ram_header->monitors_config); - - if (qdev->monitors_config->count == 0) - return; - - for (i = 0 ; i < qdev->monitors_config->count ; ++i) { - struct qxl_head *head = &qdev->monitors_config->heads[i]; - - if (head->y > 8192 || head->x > 8192 || - head->width > 8192 || head->height > 8192) { - DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", - i, head->width, head->height, - head->x, head->y); - return; - } - } - qxl_io_monitors_config(qdev); -} - static void qxl_monitors_config_set(struct qxl_device *qdev, int index, unsigned x, unsigned y, -- cgit v1.2.3 From a6d3c4d79822658e7f2f9c4b73237fe2b057ed67 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Fri, 20 Apr 2018 09:19:03 +0200 Subject: qxl: hook monitors_config updates into crtc, not encoder. The encoder callbacks are only called in case the video mode changes. So any layout changes without mode changes will go unnoticed. Add qxl_crtc_update_monitors_config(), based on the old qxl_write_monitors_config_for_encoder() function. Hook it into the enable, disable and flush atomic crtc callbacks. Remove monitors_config updates from all other places. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1544322 Signed-off-by: Gerd Hoffmann Reviewed-by: Dave Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-4-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_cmd.c | 2 + drivers/gpu/drm/qxl/qxl_display.c | 156 ++++++++++++++++---------------------- 2 files changed, 66 insertions(+), 92 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 850f8d7d37ce..95db20f2145f 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -371,6 +371,7 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev) void qxl_io_destroy_primary(struct qxl_device *qdev) { wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); + qdev->primary_created = false; } void qxl_io_create_primary(struct qxl_device *qdev, @@ -396,6 +397,7 @@ void qxl_io_create_primary(struct qxl_device *qdev, create->type = QXL_SURF_TYPE_PRIMARY; wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); + qdev->primary_created = true; } void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 7d08a26c3a8b..58959733ae16 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -281,6 +281,66 @@ static void qxl_send_monitors_config(struct qxl_device *qdev) qxl_io_monitors_config(qdev); } +static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, + const char *reason) +{ + struct drm_device *dev = crtc->dev; + struct qxl_device *qdev = dev->dev_private; + struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); + struct qxl_head head; + int oldcount, i = qcrtc->index; + + if (!qdev->primary_created) { + DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason); + return; + } + + if (!qdev->monitors_config || + qdev->monitors_config->max_allowed <= i) + return; + + head.id = i; + head.flags = 0; + oldcount = qdev->monitors_config->count; + if (crtc->state->active) { + struct drm_display_mode *mode = &crtc->mode; + head.width = mode->hdisplay; + head.height = mode->vdisplay; + head.x = crtc->x; + head.y = crtc->y; + if (qdev->monitors_config->count < i + 1) + qdev->monitors_config->count = i + 1; + } else if (i > 0) { + head.width = 0; + head.height = 0; + head.x = 0; + head.y = 0; + if (qdev->monitors_config->count == i + 1) + qdev->monitors_config->count = i; + } else { + DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason); + return; + } + + if (head.width == qdev->monitors_config->heads[i].width && + head.height == qdev->monitors_config->heads[i].height && + head.x == qdev->monitors_config->heads[i].x && + head.y == qdev->monitors_config->heads[i].y && + oldcount == qdev->monitors_config->count) + return; + + DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n", + i, head.width, head.height, head.x, head.y, + crtc->state->active ? "on" : "off", reason); + if (oldcount != qdev->monitors_config->count) + DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n", + oldcount, qdev->monitors_config->count, + qdev->monitors_config->max_allowed); + + qdev->monitors_config->heads[i] = head; + qxl_send_monitors_config(qdev); +} + static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -296,6 +356,8 @@ static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); } + + qxl_crtc_update_monitors_config(crtc, "flush"); } static void qxl_crtc_destroy(struct drm_crtc *crtc) @@ -401,55 +463,20 @@ static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static void qxl_monitors_config_set(struct qxl_device *qdev, - int index, - unsigned x, unsigned y, - unsigned width, unsigned height, - unsigned surf_id) -{ - DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y); - qdev->monitors_config->heads[index].x = x; - qdev->monitors_config->heads[index].y = y; - qdev->monitors_config->heads[index].width = width; - qdev->monitors_config->heads[index].height = height; - qdev->monitors_config->heads[index].surface_id = surf_id; - -} - -static void qxl_mode_set_nofb(struct drm_crtc *crtc) -{ - struct qxl_device *qdev = crtc->dev->dev_private; - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); - struct drm_display_mode *mode = &crtc->mode; - - DRM_DEBUG("Mode set (%d,%d)\n", - mode->hdisplay, mode->vdisplay); - - qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, - mode->hdisplay, mode->vdisplay, 0); - -} - static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { - DRM_DEBUG("\n"); + qxl_crtc_update_monitors_config(crtc, "enable"); } static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); - struct qxl_device *qdev = crtc->dev->dev_private; - - qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0); - - qxl_send_monitors_config(qdev); + qxl_crtc_update_monitors_config(crtc, "disable"); } static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { .mode_fixup = qxl_crtc_mode_fixup, - .mode_set_nofb = qxl_mode_set_nofb, .atomic_flush = qxl_crtc_atomic_flush, .atomic_enable = qxl_crtc_atomic_enable, .atomic_disable = qxl_crtc_atomic_disable, @@ -939,61 +966,8 @@ static void qxl_enc_prepare(struct drm_encoder *encoder) DRM_DEBUG("\n"); } -static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev, - struct drm_encoder *encoder) -{ - int i; - struct qxl_output *output = drm_encoder_to_qxl_output(encoder); - struct qxl_head *head; - struct drm_display_mode *mode; - - BUG_ON(!encoder); - /* TODO: ugly, do better */ - i = output->index; - if (!qdev->monitors_config || - qdev->monitors_config->max_allowed <= i) { - DRM_ERROR( - "head number too large or missing monitors config: %p, %d", - qdev->monitors_config, - qdev->monitors_config ? - qdev->monitors_config->max_allowed : -1); - return; - } - if (!encoder->crtc) { - DRM_ERROR("missing crtc on encoder %p\n", encoder); - return; - } - if (i != 0) - DRM_DEBUG("missing for multiple monitors: no head holes\n"); - head = &qdev->monitors_config->heads[i]; - head->id = i; - if (encoder->crtc->enabled) { - mode = &encoder->crtc->mode; - head->width = mode->hdisplay; - head->height = mode->vdisplay; - head->x = encoder->crtc->x; - head->y = encoder->crtc->y; - if (qdev->monitors_config->count < i + 1) - qdev->monitors_config->count = i + 1; - } else { - head->width = 0; - head->height = 0; - head->x = 0; - head->y = 0; - } - DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n", - i, head->x, head->y, head->width, head->height, qdev->monitors_config->count); - head->flags = 0; - /* TODO - somewhere else to call this for multiple monitors - * (config_commit?) */ - qxl_send_monitors_config(qdev); -} - static void qxl_enc_commit(struct drm_encoder *encoder) { - struct qxl_device *qdev = encoder->dev->dev_private; - - qxl_write_monitors_config_for_encoder(qdev, encoder); DRM_DEBUG("\n"); } @@ -1080,8 +1054,6 @@ static enum drm_connector_status qxl_conn_detect( qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); DRM_DEBUG("#%d connected: %d\n", output->index, connected); - if (!connected) - qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0); return connected ? connector_status_connected : connector_status_disconnected; -- cgit v1.2.3 From cc4e44d5156933272df02d27f99322100e1edd1d Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Fri, 20 Apr 2018 09:19:04 +0200 Subject: qxl: drop dummy functions These days drm core checks function pointers everywhere before calling them. So we can drop a bunch of dummy functions now. Signed-off-by: Gerd Hoffmann Reviewed-by: Daniel Vetter Reviewed-by: Dave Airlie Link: http://patchwork.freedesktop.org/patch/msgid/20180420071904.24276-5-kraxel@redhat.com --- drivers/gpu/drm/qxl/qxl_display.c | 50 --------------------------------------- 1 file changed, 50 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 58959733ae16..b8cda9449241 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -456,13 +456,6 @@ qxl_framebuffer_init(struct drm_device *dev, return 0; } -static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -476,7 +469,6 @@ static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { - .mode_fixup = qxl_crtc_mode_fixup, .atomic_flush = qxl_crtc_atomic_flush, .atomic_enable = qxl_crtc_atomic_enable, .atomic_disable = qxl_crtc_atomic_disable, @@ -620,12 +612,6 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, } } -static int qxl_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) -{ - return 0; -} - static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -831,7 +817,6 @@ static const uint32_t qxl_cursor_plane_formats[] = { }; static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = { - .atomic_check = qxl_plane_atomic_check, .atomic_update = qxl_cursor_atomic_update, .atomic_disable = qxl_cursor_atomic_disable, .prepare_fb = qxl_plane_prepare_fb, @@ -956,28 +941,6 @@ free_mem: return r; } -static void qxl_enc_dpms(struct drm_encoder *encoder, int mode) -{ - DRM_DEBUG("\n"); -} - -static void qxl_enc_prepare(struct drm_encoder *encoder) -{ - DRM_DEBUG("\n"); -} - -static void qxl_enc_commit(struct drm_encoder *encoder) -{ - DRM_DEBUG("\n"); -} - -static void qxl_enc_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - DRM_DEBUG("\n"); -} - static int qxl_conn_get_modes(struct drm_connector *connector) { unsigned pwidth = 1024; @@ -1023,10 +986,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { - .dpms = qxl_enc_dpms, - .prepare = qxl_enc_prepare, - .mode_set = qxl_enc_mode_set, - .commit = qxl_enc_commit, }; static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { @@ -1059,14 +1018,6 @@ static enum drm_connector_status qxl_conn_detect( : connector_status_disconnected; } -static int qxl_conn_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t value) -{ - DRM_DEBUG("\n"); - return 0; -} - static void qxl_conn_destroy(struct drm_connector *connector) { struct qxl_output *qxl_output = @@ -1081,7 +1032,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = qxl_conn_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = qxl_conn_set_property, .destroy = qxl_conn_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, -- cgit v1.2.3 From c2af73645d3a67cc2e4a750179048a4c6d5110a1 Mon Sep 17 00:00:00 2001 From: Philippe CORNU Date: Tue, 17 Apr 2018 13:34:41 +0200 Subject: drm/stm: ltdc: fix deferred endpoint management MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a driver related to one of the endpoints is deferred due to probe dependencies (i2c, spi...) but the other one is ready, ltdc probe continues and the deferred driver will never be probed again. The fix consists in waiting for all deferred endpoints before continuing the ltdc probe. Signed-off-by: Philippe Cornu Reviewed-by: Yannick Fertré Link: https://patchwork.freedesktop.org/patch/msgid/20180417113441.8214-1-philippe.cornu@st.com --- drivers/gpu/drm/stm/ltdc.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index e3121d9e4230..014cef8cef37 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -987,14 +987,13 @@ int ltdc_load(struct drm_device *ddev) &bridge[i]); /* - * If at least one endpoint is ready, continue probing, - * else if at least one endpoint is -EPROBE_DEFER and - * there is no previous ready endpoints, defer probing. + * If at least one endpoint is -EPROBE_DEFER, defer probing, + * else if at least one endpoint is ready, continue probing. */ - if (!ret) + if (ret == -EPROBE_DEFER) + return ret; + else if (!ret) endpoint_not_ready = 0; - else if (ret == -EPROBE_DEFER && endpoint_not_ready) - endpoint_not_ready = -EPROBE_DEFER; } if (endpoint_not_ready) -- cgit v1.2.3 From 0cefff963bf2af9ec9b2e2b537c3d3e6a43ceb9b Mon Sep 17 00:00:00 2001 From: Philippe CORNU Date: Tue, 17 Apr 2018 13:40:26 +0200 Subject: drm/stm: ltdc: add mode_valid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add mode_valid() function to filter modes according to available pll clock values and "preferred" modes. It is particularly useful for hdmi modes that require precise pixel clocks. Note that "preferred" modes are always accepted: - this is important for panels because panel clock tolerances are bigger than hdmi ones and there is no reason to not accept them (the fps may vary a little but it is not a problem). - the hdmi preferred mode will be accepted too, but userland will be able to use others hdmi "valid" modes if necessary. Signed-off-by: Philippe Cornu Reviewed-by: Yannick Fertré Link: https://patchwork.freedesktop.org/patch/msgid/20180417114026.8709-1-philippe.cornu@st.com --- drivers/gpu/drm/stm/ltdc.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 014cef8cef37..616191fe98ae 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -445,6 +445,43 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc, reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR); } +#define CLK_TOLERANCE_HZ 50 + +static enum drm_mode_status +ltdc_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct ltdc_device *ldev = crtc_to_ltdc(crtc); + int target = mode->clock * 1000; + int target_min = target - CLK_TOLERANCE_HZ; + int target_max = target + CLK_TOLERANCE_HZ; + int result; + + /* + * Accept all "preferred" modes: + * - this is important for panels because panel clock tolerances are + * bigger than hdmi ones and there is no reason to not accept them + * (the fps may vary a little but it is not a problem). + * - the hdmi preferred mode will be accepted too, but userland will + * be able to use others hdmi "valid" modes if necessary. + */ + if (mode->type & DRM_MODE_TYPE_PREFERRED) + return MODE_OK; + + result = clk_round_rate(ldev->pixel_clk, target); + + DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result); + + /* + * Filter modes according to the clock value, particularly useful for + * hdmi modes that require precise pixel clocks. + */ + if (result < target_min || result > target_max) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -559,6 +596,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = { + .mode_valid = ltdc_crtc_mode_valid, .mode_fixup = ltdc_crtc_mode_fixup, .mode_set_nofb = ltdc_crtc_mode_set_nofb, .atomic_flush = ltdc_crtc_atomic_flush, -- cgit v1.2.3 From cccb57d8fdc9332c14f451e96a9604fa02a5bed2 Mon Sep 17 00:00:00 2001 From: Philippe CORNU Date: Thu, 19 Apr 2018 15:28:04 +0200 Subject: drm/stm: ltdc: fix warnings in ltdc_plane_create() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "make C=1" returns 2 warnings in ltdc_plane_create() ("Using plain integer as NULL pointer"). This patch fixes them. Signed-off-by: Philippe Cornu Reviewed-by: Yannick Fertré Link: https://patchwork.freedesktop.org/patch/msgid/20180419132804.8317-1-philippe.cornu@st.com --- drivers/gpu/drm/stm/ltdc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 616191fe98ae..d997a6014d6c 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -860,13 +860,13 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev, plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL); if (!plane) - return 0; + return NULL; ret = drm_universal_plane_init(ddev, plane, possible_crtcs, <dc_plane_funcs, formats, nb_fmt, NULL, type, NULL); if (ret < 0) - return 0; + return NULL; drm_plane_helper_add(plane, <dc_plane_helper_funcs); -- cgit v1.2.3 From 30e9db6d046ba667070e5a011a13951830d60a6e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 16 Mar 2018 21:04:20 +0200 Subject: drm: Don't pass the index to drm_property_add_enum() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_property_add_enum() can calculate the index itself just fine, so no point in having the caller pass it in. Cc: Patrik Jakobsson Cc: Ben Skeggs Cc: nouveau@lists.freedesktop.org Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180316190420.26734-1-ville.syrjala@linux.intel.com Reviewed-by: Stanislav Lisovskiy --- drivers/gpu/drm/drm_connector.c | 6 +++--- drivers/gpu/drm/drm_property.c | 27 +++++++++++++-------------- drivers/gpu/drm/gma500/cdv_device.c | 4 ++-- drivers/gpu/drm/gma500/psb_intel_sdvo.c | 2 +- drivers/gpu/drm/i915/intel_sdvo.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_display.c | 4 +--- include/drm/drm_property.h | 2 +- 7 files changed, 23 insertions(+), 27 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b3cde897cd80..dfc8ca1e9413 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1069,7 +1069,7 @@ int drm_mode_create_tv_properties(struct drm_device *dev, goto nomem; for (i = 0; i < num_modes; i++) - drm_property_add_enum(dev->mode_config.tv_mode_property, i, + drm_property_add_enum(dev->mode_config.tv_mode_property, i, modes[i]); dev->mode_config.tv_brightness_property = @@ -1156,7 +1156,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, { struct drm_device *dev = connector->dev; struct drm_property *scaling_mode_property; - int i, j = 0; + int i; const unsigned valid_scaling_mode_mask = (1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1; @@ -1177,7 +1177,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, if (!(BIT(i) & scaling_mode_mask)) continue; - ret = drm_property_add_enum(scaling_mode_property, j++, + ret = drm_property_add_enum(scaling_mode_property, drm_scaling_mode_enum_list[i].type, drm_scaling_mode_enum_list[i].name); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 8f4672daac7f..1f8031e30f53 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -169,9 +169,9 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, return NULL; for (i = 0; i < num_values; i++) { - ret = drm_property_add_enum(property, i, - props[i].type, - props[i].name); + ret = drm_property_add_enum(property, + props[i].type, + props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; @@ -209,7 +209,7 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev, uint64_t supported_bits) { struct drm_property *property; - int i, ret, index = 0; + int i, ret; int num_values = hweight64(supported_bits); flags |= DRM_MODE_PROP_BITMASK; @@ -221,14 +221,9 @@ struct drm_property *drm_property_create_bitmask(struct drm_device *dev, if (!(supported_bits & (1ULL << props[i].type))) continue; - if (WARN_ON(index >= num_values)) { - drm_property_destroy(dev, property); - return NULL; - } - - ret = drm_property_add_enum(property, index++, - props[i].type, - props[i].name); + ret = drm_property_add_enum(property, + props[i].type, + props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; @@ -376,7 +371,6 @@ EXPORT_SYMBOL(drm_property_create_bool); /** * drm_property_add_enum - add a possible value to an enumeration property * @property: enumeration property to change - * @index: index of the new enumeration * @value: value of the new enumeration * @name: symbolic name of the new enumeration * @@ -388,10 +382,11 @@ EXPORT_SYMBOL(drm_property_create_bool); * Returns: * Zero on success, error code on failure. */ -int drm_property_add_enum(struct drm_property *property, int index, +int drm_property_add_enum(struct drm_property *property, uint64_t value, const char *name) { struct drm_property_enum *prop_enum; + int index = 0; if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN)) return -EINVAL; @@ -411,8 +406,12 @@ int drm_property_add_enum(struct drm_property *property, int index, list_for_each_entry(prop_enum, &property->enum_list, head) { if (WARN_ON(prop_enum->value == value)) return -EINVAL; + index++; } + if (WARN_ON(index >= property->num_values)) + return -EINVAL; + prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); if (!prop_enum) return -ENOMEM; diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 3a3bf752e03a..34b85767e4da 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -485,7 +485,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector) return; for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) - drm_property_add_enum(prop, i, i-1, force_audio_names[i]); + drm_property_add_enum(prop, i-1, force_audio_names[i]); dev_priv->force_audio_property = prop; } @@ -514,7 +514,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) return; for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) - drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); + drm_property_add_enum(prop, i, broadcast_rgb_names[i]); dev_priv->broadcast_rgb_property = prop; } diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 8dc2b19f913b..f2ee6aa10afa 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -2281,7 +2281,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( - psb_intel_sdvo_connector->tv_format, i, + psb_intel_sdvo_connector->tv_format, i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 96e213ec202d..25005023c243 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2779,9 +2779,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) - drm_property_add_enum( - intel_sdvo_connector->tv_format, i, - i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); + drm_property_add_enum(intel_sdvo_connector->tv_format, i, + tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0]; drm_object_attach_property(&intel_sdvo_connector->base.base.base, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 009713404cc4..7d0bec8dd03d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -338,11 +338,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { if (c) { \ p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ l = (list); \ - c = 0; \ while (p && l->gen_mask) { \ if (l->gen_mask & (1 << (gen))) { \ - drm_property_add_enum(p, c, l->type, l->name); \ - c++; \ + drm_property_add_enum(p, l->type, l->name); \ } \ l++; \ } \ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index ab8167baade5..1d5c0b2a8956 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -260,7 +260,7 @@ struct drm_property *drm_property_create_object(struct drm_device *dev, uint32_t type); struct drm_property *drm_property_create_bool(struct drm_device *dev, u32 flags, const char *name); -int drm_property_add_enum(struct drm_property *property, int index, +int drm_property_add_enum(struct drm_property *property, uint64_t value, const char *name); void drm_property_destroy(struct drm_device *dev, struct drm_property *property); -- cgit v1.2.3 From 9f99963a43b735160052debbef3d99ad344db61d Mon Sep 17 00:00:00 2001 From: Tom Callaway Date: Mon, 23 Apr 2018 12:16:39 -0400 Subject: drm/tinydrm/mi0283qt: Always set rotation value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PiTFT (ili9340) has a hardware reset circuit that resets only on power-on and not on each reboot through a gpio like the rpi-display does. As a result, we need to always apply the rotation value regardless of the display "on/off" state. Moved the rotation setting code below out_enable:. Signed-off-by: Tom Callaway Reviewed-by: Noralf Trønnes Signed-off-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/msgid/20180423161639.14420-1-tcallawa@redhat.com --- drivers/gpu/drm/tinydrm/mi0283qt.c | 41 +++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c index d5ef65179c16..015d03f2acba 100644 --- a/drivers/gpu/drm/tinydrm/mi0283qt.c +++ b/drivers/gpu/drm/tinydrm/mi0283qt.c @@ -85,24 +85,6 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe, /* Memory Access Control */ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); - switch (mipi->rotation) { - default: - addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | - ILI9341_MADCTL_MX; - break; - case 90: - addr_mode = ILI9341_MADCTL_MY; - break; - case 180: - addr_mode = ILI9341_MADCTL_MV; - break; - case 270: - addr_mode = ILI9341_MADCTL_MX; - break; - } - addr_mode |= ILI9341_MADCTL_BGR; - mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); - /* Frame Rate */ mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b); @@ -128,6 +110,29 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe, msleep(100); out_enable: + /* The PiTFT (ili9340) has a hardware reset circuit that + * resets only on power-on and not on each reboot through + * a gpio like the rpi-display does. + * As a result, we need to always apply the rotation value + * regardless of the display "on/off" state. + */ + switch (mipi->rotation) { + default: + addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | + ILI9341_MADCTL_MX; + break; + case 90: + addr_mode = ILI9341_MADCTL_MY; + break; + case 180: + addr_mode = ILI9341_MADCTL_MV; + break; + case 270: + addr_mode = ILI9341_MADCTL_MX; + break; + } + addr_mode |= ILI9341_MADCTL_BGR; + mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); } -- cgit v1.2.3 From d34deab907605a81eec83afe006fad2e5b4673b4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 23 Apr 2018 17:46:08 -0700 Subject: drm: Make the prime vmap/vunmap hooks optional. Some drivers leave these unimplemented, so don't make them have unimplemented stubs. Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180424004610.4637-2-eric@anholt.net Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_prime.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index caf675e3e692..397b46b33739 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -409,7 +409,10 @@ void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; - return dev->driver->gem_prime_vmap(obj); + if (dev->driver->gem_prime_vmap) + return dev->driver->gem_prime_vmap(obj); + else + return NULL; } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); @@ -426,7 +429,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; - dev->driver->gem_prime_vunmap(obj, vaddr); + if (dev->driver->gem_prime_vunmap) + dev->driver->gem_prime_vunmap(obj, vaddr); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); -- cgit v1.2.3 From 5e7854bd036ec9b8d8a861def32ecbfd97ca4e77 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Wed, 25 Apr 2018 12:39:53 +0530 Subject: drm/vc4: make function vc4_allocate_bin_bo static Sparse complains with following warning: drivers/gpu/drm/vc4/vc4_v3d.c:222:1: warning: symbol 'vc4_allocate_bin_bo' was not declared. Should it be static? Make vc4_allocate_bin static as it is not used outside of vc4_v3d.c. Signed-off-by: Vaishali Thakkar Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180425070953.17933-1-vthakkar1994@gmail.com --- drivers/gpu/drm/vc4/vc4_v3d.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index bfc2fa73d2ae..e47e29426078 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -218,8 +218,7 @@ try_again: * overall CMA pool before they make scenes complicated enough to run * out of bin space. */ -int -vc4_allocate_bin_bo(struct drm_device *drm) +static int vc4_allocate_bin_bo(struct drm_device *drm) { struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_v3d *v3d = vc4->v3d; -- cgit v1.2.3 From 1825067e2b49c984d4b1a77f3720a7ae2576d226 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 31 Oct 2017 12:32:57 -0700 Subject: drm/vc4: Skip ULPS latching when we're in that ULPS state already. It seems that trying to go from unlatched to unlatched will time out waiting for STOP, and we can just skip that. Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20171031193258.17373-1-eric@anholt.net Reviewed-by: Boris Brezillon --- drivers/gpu/drm/vc4/vc4_dsi.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 94085f8bcd68..8aa897835118 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -753,6 +753,11 @@ static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps) (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0)); int ret; + bool ulps_currently_enabled = (DSI_PORT_READ(PHY_AFEC0) & + DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS)); + + if (ulps == ulps_currently_enabled) + return; DSI_PORT_WRITE(STAT, stat_ulps); DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps); -- cgit v1.2.3 From 3481fe768faeae3f1d2a929e401748893460d82e Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 31 Oct 2017 12:32:58 -0700 Subject: drm/panel: Enable DSI transactions on the RPi panel. It turns out that I had just mistaken what type of write the register writes were supposed to be, using DCS instead of generic long writes. Switching to transactions instead of using the atmel as a bridge also seems to resolve the sparkling pixels problem I've had. Signed-off-by: Eric Anholt Fixes: 2f733d6194bd ("drm/panel: Add support for the Raspberry Pi 7" Touchscreen.") Link: https://patchwork.freedesktop.org/patch/msgid/20171031193258.17373-2-eric@anholt.net Reviewed-by: Boris Brezillon Acked-by: Thierry Reding --- drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index d964d454e4ae..2c9c9722734f 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -238,12 +238,6 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts, static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) { -#if 0 - /* The firmware uses LP DSI transactions like this to bring up - * the hardware, which should be faster than using I2C to then - * pass to the Toshiba. However, I was unable to get it to - * work. - */ u8 msg[] = { reg, reg >> 8, @@ -253,13 +247,7 @@ static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) val >> 24, }; - mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg)); -#else - rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8); - rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg); - rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8); - rpi_touchscreen_i2c_write(ts, REG_WRITEL, val); -#endif + mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg)); return 0; } -- cgit v1.2.3 From 818f5c8f4cd27747e8218e8a5fb230c322e02d1e Mon Sep 17 00:00:00 2001 From: Stefan Schake Date: Wed, 25 Apr 2018 00:03:45 +0200 Subject: drm/vc4: Syncobj import support Allow userland to specify a syncobj that is waited on before a render job starts processing. v2: Use 0 as invalid syncobj to drop flag (Eric) Drop extra newline (Eric) Signed-off-by: Stefan Schake Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-2-git-send-email-stschake@gmail.com --- drivers/gpu/drm/vc4/vc4_drv.h | 1 + drivers/gpu/drm/vc4/vc4_gem.c | 30 +++++++++++++++++++++++++----- include/uapi/drm/vc4_drm.h | 7 +++---- 3 files changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 22589d39083c..554a4e810d5b 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -11,6 +11,7 @@ #include #include #include +#include #include "uapi/drm/vc4_drm.h" diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 2107b0daf8ef..e305ccdedf47 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" @@ -1115,6 +1116,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_vc4_submit_cl *args = data; struct vc4_exec_info *exec; struct ww_acquire_ctx acquire_ctx; + struct dma_fence *in_fence; int ret = 0; if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | @@ -1125,11 +1127,6 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (args->pad2 != 0) { - DRM_DEBUG("->pad2 must be set to zero\n"); - return -EINVAL; - } - exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); if (!exec) { DRM_ERROR("malloc failure on exec struct\n"); @@ -1164,6 +1161,29 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, } } + if (args->in_sync) { + ret = drm_syncobj_find_fence(file_priv, args->in_sync, + &in_fence); + if (ret) + goto fail; + + /* When the fence (or fence array) is exclusively from our + * context we can skip the wait since jobs are executed in + * order of their submission through this ioctl and this can + * only have fences from a prior job. + */ + if (!dma_fence_match_context(in_fence, + vc4->dma_fence_context)) { + ret = dma_fence_wait(in_fence, true); + if (ret) { + dma_fence_put(in_fence); + goto fail; + } + } + + dma_fence_put(in_fence); + } + if (exec->args->bin_cl_size != 0) { ret = vc4_get_bcl(dev, exec); if (ret) diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index b95a0e11cb07..d97065b86431 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -183,11 +183,10 @@ struct drm_vc4_submit_cl { /* ID of the perfmon to attach to this job. 0 means no perfmon. */ __u32 perfmonid; - /* Unused field to align this struct on 64 bits. Must be set to 0. - * If one ever needs to add an u32 field to this struct, this field - * can be used. + /* Syncobj handle to wait on. If set, processing of this render job + * will not start until the syncobj is signaled. 0 means ignore. */ - __u32 pad2; + __u32 in_sync; }; /** -- cgit v1.2.3 From e84fcb95e07442edd7ce3b13973523646dbc581a Mon Sep 17 00:00:00 2001 From: Stefan Schake Date: Wed, 25 Apr 2018 00:03:46 +0200 Subject: drm/vc4: Export fence through syncobj Allow specifying a syncobj on render job submission where we store the fence for the job. This gives userland flexible access to the fence. v2: Use 0 as invalid syncobj to drop flag (Eric) Don't reintroduce the padding (Eric) Signed-off-by: Stefan Schake Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-3-git-send-email-stschake@gmail.com --- drivers/gpu/drm/vc4/vc4_gem.c | 30 ++++++++++++++++++++++++++++-- include/uapi/drm/vc4_drm.h | 6 ++++++ 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index e305ccdedf47..a4c4be3ac6af 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -656,7 +656,8 @@ retry: */ static int vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, - struct ww_acquire_ctx *acquire_ctx) + struct ww_acquire_ctx *acquire_ctx, + struct drm_syncobj *out_sync) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *renderjob; @@ -679,6 +680,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, fence->seqno = exec->seqno; exec->fence = &fence->base; + if (out_sync) + drm_syncobj_replace_fence(out_sync, exec->fence); + vc4_update_bo_seqnos(exec, seqno); vc4_unlock_bo_reservations(dev, exec, acquire_ctx); @@ -1114,6 +1118,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file_priv->driver_priv; struct drm_vc4_submit_cl *args = data; + struct drm_syncobj *out_sync = NULL; struct vc4_exec_info *exec; struct ww_acquire_ctx acquire_ctx; struct dma_fence *in_fence; @@ -1201,12 +1206,33 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; + if (args->out_sync) { + out_sync = drm_syncobj_find(file_priv, args->out_sync); + if (!out_sync) { + ret = -EINVAL; + goto fail; + } + + /* We replace the fence in out_sync in vc4_queue_submit since + * the render job could execute immediately after that call. + * If it finishes before our ioctl processing resumes the + * render job fence could already have been freed. + */ + } + /* Clear this out of the struct we'll be putting in the queue, * since it's part of our stack. */ exec->args = NULL; - ret = vc4_queue_submit(dev, exec, &acquire_ctx); + ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); + + /* The syncobj isn't part of the exec data and we need to free our + * reference even if job submission failed. + */ + if (out_sync) + drm_syncobj_put(out_sync); + if (ret) goto fail; diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index d97065b86431..2be4fe3610b8 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -187,6 +187,12 @@ struct drm_vc4_submit_cl { * will not start until the syncobj is signaled. 0 means ignore. */ __u32 in_sync; + + /* Syncobj handle to export fence to. If set, the fence in the syncobj + * will be replaced with a fence that signals upon completion of this + * render job. 0 means ignore. + */ + __u32 out_sync; }; /** -- cgit v1.2.3 From c720d8914397fe8efc568eea71e0dd240755a2d9 Mon Sep 17 00:00:00 2001 From: Stefan Schake Date: Wed, 25 Apr 2018 00:03:47 +0200 Subject: drm/vc4: Enable syncobj support This doesn't require any additional functionality from the driver but is a prerequisite to userland calling the syncobj ioctls. Signed-off-by: Stefan Schake Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/1524607427-12876-4-git-send-email-stschake@gmail.com --- drivers/gpu/drm/vc4/vc4_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 40ddeaafd65f..d9b8b701d2ce 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -175,7 +175,8 @@ static struct drm_driver vc4_drm_driver = { DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_RENDER | - DRIVER_PRIME), + DRIVER_PRIME | + DRIVER_SYNCOBJ), .lastclose = drm_fb_helper_lastclose, .open = vc4_open, .postclose = vc4_close, -- cgit v1.2.3 From 58badaa7783dc341daa1586235823fff94d3f96a Mon Sep 17 00:00:00 2001 From: "Kristian H. Kristensen" Date: Wed, 18 Apr 2018 10:31:52 -0700 Subject: drm/rockchip: Disable blending for win0 Blending win0 with the background color doesn't seem to work correctly. We only get the background color, no matter the contents of the win0 framebuffer. However, blending pre-multiplied color with the default opaque black default background color is a no-op, so we can just disable blending to get the correct result. Signed-off-by: Kristian H. Kristensen Cc: Sandy Huang Cc: Sean Paul Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20180418173152.93246-1-hoegsberg@chromium.org --- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fe3faa7c38d9..2121345a61af 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -76,6 +76,9 @@ #define VOP_WIN_GET_YRGBADDR(vop, win) \ vop_readl(vop, win->base + win->phy->yrgb_mst.offset) +#define VOP_WIN_TO_INDEX(vop_win) \ + ((vop_win) - (vop_win)->vop->win) + #define to_vop(x) container_of(x, struct vop, crtc) #define to_vop_win(x) container_of(x, struct vop_win, base) @@ -708,6 +711,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, dma_addr_t dma_addr; uint32_t val; bool rb_swap; + int win_index = VOP_WIN_TO_INDEX(vop_win); int format; /* @@ -777,7 +781,14 @@ static void vop_plane_atomic_update(struct drm_plane *plane, rb_swap = has_rb_swapped(fb->format->format); VOP_WIN_SET(vop, win, rb_swap, rb_swap); - if (fb->format->has_alpha) { + /* + * Blending win0 with the background color doesn't seem to work + * correctly. We only get the background color, no matter the contents + * of the win0 framebuffer. However, blending pre-multiplied color + * with the default opaque black default background color is a no-op, + * so we can just disable blending to get the correct result. + */ + if (fb->format->has_alpha && win_index > 0) { VOP_WIN_SET(vop, win, dst_alpha_ctl, DST_FACTOR_M0(ALPHA_SRC_INVERSE)); val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | -- cgit v1.2.3 From 304f72e5947d63682159d2f575f56607592df500 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 2 May 2018 11:10:48 +0100 Subject: gpu: drm: sti: fix spelling mistake: "initialze" -> "initialize" Trivial fix to spelling mistake in DRM_ERROR error message Signed-off-by: Colin Ian King Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180502101048.8442-1-colin.king@canonical.com --- drivers/gpu/drm/sti/sti_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 21e50d7b1f86..5824e6aca8f4 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -357,7 +357,7 @@ int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, &sti_crtc_funcs, NULL); if (res) { - DRM_ERROR("Can't initialze CRTC\n"); + DRM_ERROR("Can't initialize CRTC\n"); return -EINVAL; } -- cgit v1.2.3 From ca454bd42dc24374150febf83a443e8c1d9cf28a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 2 May 2018 15:47:18 +0200 Subject: drm/pl111: Support the Versatile Express The Versatile Express uses a special configuration controller deeply embedded in the system motherboard FPGA to multiplex the two to three (!) display controller instances out to the single SiI9022 bridge. Set up an extra file with the logic to probe to the FPGA mux register on the system controller bus, then parse the device tree to see if there is a CLCD or HDLCD instance on the core tile (also known as the daughterboard) by looking in the root of the device tree for compatible nodes. - If there is a HDLCD on the core tile, and there is a driver for it, we exit probe and deactivate the motherboard CLCD. We do not touch the DVI mux in this case, to make sure we don't break HDLCD. - If there is a CLCD on both the motherboard and the core tile (only the CA9 has this) the core tile CLCD takes precedence and get muxed to the DVI connector. - Only if there is no working graphics on the core tile, the motherboard CLCD is probed and muxed to the DVI connector. Core tile graphics should always take precedence as it can address all memory and is also faster, however the motherboard CLCD is good to have around for diagnostics and testing. It is possible to test the motherboard CLCD by setting the status = "disabled" property on the core tile CLCD or HDLCD. Scale down the Versatile Express to 16BPP so we can support a 1024x768 display despite the bus bandwidth restrictions on this platform. (The motherboard CLCD supports slightly lower resolution.) Cc: Liviu Dudau Cc: Pawel Moll Acked-by: Eric Anholt Tested-by: Robin Murphy Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/20180502134719.8388-1-linus.walleij@linaro.org --- drivers/gpu/drm/pl111/Makefile | 1 + drivers/gpu/drm/pl111/pl111_versatile.c | 49 ++++++++++++- drivers/gpu/drm/pl111/pl111_vexpress.c | 125 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/pl111/pl111_vexpress.h | 22 ++++++ 4 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/pl111/pl111_vexpress.c create mode 100644 drivers/gpu/drm/pl111/pl111_vexpress.h (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile index 9c5e8dba8ac6..19a8189dc54f 100644 --- a/drivers/gpu/drm/pl111/Makefile +++ b/drivers/gpu/drm/pl111/Makefile @@ -3,6 +3,7 @@ pl111_drm-y += pl111_display.o \ pl111_versatile.o \ pl111_drv.o +pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o obj-$(CONFIG_DRM_PL111) += pl111_drm.o diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index 9302f516045e..78ddf8534fd2 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -1,12 +1,14 @@ #include #include #include +#include #include #include #include #include #include #include "pl111_versatile.h" +#include "pl111_vexpress.h" #include "pl111_drm.h" static struct regmap *versatile_syscon_map; @@ -22,6 +24,7 @@ enum versatile_clcd { REALVIEW_CLCD_PB11MP, REALVIEW_CLCD_PBA8, REALVIEW_CLCD_PBX, + VEXPRESS_CLCD_V2M, }; static const struct of_device_id versatile_clcd_of_match[] = { @@ -53,6 +56,10 @@ static const struct of_device_id versatile_clcd_of_match[] = { .compatible = "arm,realview-pbx-syscon", .data = (void *)REALVIEW_CLCD_PBX, }, + { + .compatible = "arm,vexpress-muxfpga", + .data = (void *)VEXPRESS_CLCD_V2M, + }, {}, }; @@ -286,12 +293,26 @@ static const struct pl111_variant_data pl111_realview = { .fb_bpp = 16, }; +/* + * Versatile Express PL111 variant, again we just push the maximum + * BPP to 16 to be able to get 1024x768 without saturating the memory + * bus. The clockdivider also seems broken on the Versatile Express. + */ +static const struct pl111_variant_data pl111_vexpress = { + .name = "PL111 Versatile Express", + .formats = pl111_realview_pixel_formats, + .nformats = ARRAY_SIZE(pl111_realview_pixel_formats), + .fb_bpp = 16, + .broken_clockdivider = true, +}; + int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) { const struct of_device_id *clcd_id; enum versatile_clcd versatile_clcd_type; struct device_node *np; struct regmap *map; + int ret; np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, &clcd_id); @@ -301,7 +322,26 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) } versatile_clcd_type = (enum versatile_clcd)clcd_id->data; - map = syscon_node_to_regmap(np); + /* Versatile Express special handling */ + if (versatile_clcd_type == VEXPRESS_CLCD_V2M) { + struct platform_device *pdev; + + /* Call into deep Vexpress configuration API */ + pdev = of_find_device_by_node(np); + if (!pdev) { + dev_err(dev, "can't find the sysreg device, deferring\n"); + return -EPROBE_DEFER; + } + map = dev_get_drvdata(&pdev->dev); + if (!map) { + dev_err(dev, "sysreg has not yet probed\n"); + platform_device_put(pdev); + return -EPROBE_DEFER; + } + } else { + map = syscon_node_to_regmap(np); + } + if (IS_ERR(map)) { dev_err(dev, "no Versatile syscon regmap\n"); return PTR_ERR(map); @@ -340,6 +380,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) priv->variant_display_disable = pl111_realview_clcd_disable; dev_info(dev, "set up callbacks for RealView PL111\n"); break; + case VEXPRESS_CLCD_V2M: + priv->variant = &pl111_vexpress; + dev_info(dev, "initializing Versatile Express PL111\n"); + ret = pl111_vexpress_clcd_init(dev, priv, map); + if (ret) + return ret; + break; default: dev_info(dev, "unknown Versatile system controller\n"); break; diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c new file mode 100644 index 000000000000..c9fee625faf1 --- /dev/null +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Versatile Express PL111 handling + * Copyright (C) 2018 Linus Walleij + * + * This module binds to the "arm,vexpress-muxfpga" device on the + * Versatile Express configuration bus and sets up which CLCD instance + * gets muxed out on the DVI bridge. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "pl111_drm.h" +#include "pl111_vexpress.h" + +#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00 +#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01 +#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02 + +int pl111_vexpress_clcd_init(struct device *dev, + struct pl111_drm_dev_private *priv, + struct regmap *map) +{ + struct device_node *root; + struct device_node *child; + struct device_node *ct_clcd = NULL; + bool has_coretile_clcd = false; + bool has_coretile_hdlcd = false; + bool mux_motherboard = true; + u32 val; + int ret; + + /* + * Check if we have a CLCD or HDLCD on the core tile by checking if a + * CLCD or HDLCD is available in the root of the device tree. + */ + root = of_find_node_by_path("/"); + if (!root) + return -EINVAL; + + for_each_available_child_of_node(root, child) { + if (of_device_is_compatible(child, "arm,pl111")) { + has_coretile_clcd = true; + ct_clcd = child; + break; + } + if (of_device_is_compatible(child, "arm,hdlcd")) { + has_coretile_hdlcd = true; + break; + } + } + + /* + * If there is a coretile HDLCD and it has a driver, + * do not mux the CLCD on the motherboard to the DVI. + */ + if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD)) + mux_motherboard = false; + + /* + * On the Vexpress CA9 we let the CLCD on the coretile + * take precedence, so also in this case do not mux the + * motherboard to the DVI. + */ + if (has_coretile_clcd) + mux_motherboard = false; + + if (mux_motherboard) { + dev_info(dev, "DVI muxed to motherboard CLCD\n"); + val = VEXPRESS_FPGAMUX_MOTHERBOARD; + } else if (ct_clcd == dev->of_node) { + dev_info(dev, + "DVI muxed to daughterboard 1 (core tile) CLCD\n"); + val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1; + } else { + dev_info(dev, "core tile graphics present\n"); + dev_info(dev, "this device will be deactivated\n"); + return -ENODEV; + } + + ret = regmap_write(map, 0, val); + if (ret) { + dev_err(dev, "error setting DVI muxmode\n"); + return -ENODEV; + } + + return 0; +} + +/* + * This sets up the regmap pointer that will then be retrieved by + * the detection code in pl111_versatile.c and passed in to the + * pl111_vexpress_clcd_init() function above. + */ +static int vexpress_muxfpga_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *map; + + map = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(map)) + return PTR_ERR(map); + dev_set_drvdata(dev, map); + + return 0; +} + +static const struct of_device_id vexpress_muxfpga_match[] = { + { .compatible = "arm,vexpress-muxfpga", } +}; + +static struct platform_driver vexpress_muxfpga_driver = { + .driver = { + .name = "vexpress-muxfpga", + .of_match_table = of_match_ptr(vexpress_muxfpga_match), + }, + .probe = vexpress_muxfpga_probe, +}; + +builtin_platform_driver(vexpress_muxfpga_driver); diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h new file mode 100644 index 000000000000..bb54864ca91e --- /dev/null +++ b/drivers/gpu/drm/pl111/pl111_vexpress.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +struct device; +struct pl111_drm_dev_private; +struct regmap; + +#ifdef CONFIG_ARCH_VEXPRESS + +int pl111_vexpress_clcd_init(struct device *dev, + struct pl111_drm_dev_private *priv, + struct regmap *map); + +#else + +static inline int pl111_vexpress_clcd_init(struct device *dev, + struct pl111_drm_dev_private *priv, + struct regmap *map) +{ + return -ENODEV; +} + +#endif -- cgit v1.2.3 From 57450671776b37d7c81cd52a89982c14bca46cfc Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 2 May 2018 15:47:19 +0200 Subject: drm/pl111: Enable device-specific assigned memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Versatile Express has 8 MB of dedicated video RAM (VRAM) on the motherboard, which is what we should be using for the PL111 if available. On this platform, the memory backplane is constructed so that only this memory will work properly with the CLCD on the motherboard, using any other memory area just gives random snow on the display. The CA9 Versatile Express also has a PL111 instance on its core tile that can address all memory, and this does not have the restriction. The memory is assigned to the device using the memory-region device tree property and a "shared-dma-pool" reserved memory pool like this: reserved-memory { #address-cells = <1>; #size-cells = <1>; ranges; vram: vram@48000000 { compatible = "shared-dma-pool"; reg = <0x48000000 0x00800000>; no-map; }; }; clcd@1f000 { compatible = "arm,pl111", "arm,primecell"; (...) memory-region = <&vram>; }·; Cc: Liviu Dudau Cc: Mali DP Maintainers Reviewed-by: Eric Anholt Tested-by: Robin Murphy Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/20180502134719.8388-2-linus.walleij@linaro.org --- drivers/gpu/drm/pl111/pl111_drm.h | 1 + drivers/gpu/drm/pl111/pl111_drv.c | 34 ++++++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h index 8639b2d4ddf7..ce4501d0ab48 100644 --- a/drivers/gpu/drm/pl111/pl111_drm.h +++ b/drivers/gpu/drm/pl111/pl111_drm.h @@ -79,6 +79,7 @@ struct pl111_drm_dev_private { const struct pl111_variant_data *variant; void (*variant_display_enable) (struct drm_device *drm, u32 format); void (*variant_display_disable) (struct drm_device *drm); + bool use_device_memory; }; int pl111_display_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 4621259d5387..454ff0804642 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -207,6 +208,24 @@ finish: return ret; } +static struct drm_gem_object * +pl111_gem_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct pl111_drm_dev_private *priv = dev->dev_private; + + /* + * When using device-specific reserved memory we can't import + * DMA buffers: those are passed by reference in any global + * memory and we can only handle a specific range of memory. + */ + if (priv->use_device_memory) + return ERR_PTR(-EINVAL); + + return drm_gem_cma_prime_import_sg_table(dev, attach, sgt); +} + DEFINE_DRM_GEM_CMA_FOPS(drm_fops); static struct drm_driver pl111_drm_driver = { @@ -227,7 +246,7 @@ static struct drm_driver pl111_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = drm_gem_prime_import, - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_import_sg_table = pl111_gem_import_sg_table, .gem_prime_export = drm_gem_prime_export, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, @@ -257,6 +276,12 @@ static int pl111_amba_probe(struct amba_device *amba_dev, drm->dev_private = priv; priv->variant = variant; + ret = of_reserved_mem_device_init(dev); + if (!ret) { + dev_info(dev, "using device-specific reserved memory\n"); + priv->use_device_memory = true; + } + if (of_property_read_u32(dev->of_node, "max-memory-bandwidth", &priv->memory_bw)) { dev_info(dev, "no max memory bandwidth specified, assume unlimited\n"); @@ -275,7 +300,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev, priv->regs = devm_ioremap_resource(dev, &amba_dev->res); if (IS_ERR(priv->regs)) { dev_err(dev, "%s failed mmio\n", __func__); - return PTR_ERR(priv->regs); + ret = PTR_ERR(priv->regs); + goto dev_unref; } /* This may override some variant settings */ @@ -305,11 +331,14 @@ static int pl111_amba_probe(struct amba_device *amba_dev, dev_unref: drm_dev_unref(drm); + of_reserved_mem_device_release(dev); + return ret; } static int pl111_amba_remove(struct amba_device *amba_dev) { + struct device *dev = &amba_dev->dev; struct drm_device *drm = amba_get_drvdata(amba_dev); struct pl111_drm_dev_private *priv = drm->dev_private; @@ -319,6 +348,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev) drm_panel_bridge_remove(priv->bridge); drm_mode_config_cleanup(drm); drm_dev_unref(drm); + of_reserved_mem_device_release(dev); return 0; } -- cgit v1.2.3 From ec66723197103eebd7f7099df6d5ea23deff679b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 26 Apr 2018 17:16:31 +0300 Subject: drm/rect: Fix drm_rect_rotation_inv() docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An overeager sed has corrupted the drm_rect_rotation_inv() documentation. Fix it up. Looks like it wasn't entirely correct before the sed fail either. We were missing _rect_ from the function names, which also explains why the sed hit these by accident. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180426141631.15798-1-ville.syrjala@linux.intel.com Reviewed-by: Daniel Vetter --- drivers/gpu/drm/drm_rect.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 9817c1445ba9..a3783ecea297 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -373,8 +373,8 @@ EXPORT_SYMBOL(drm_rect_rotate); * them when doing a rotatation and its inverse. * That is, if you do :: * - * DRM_MODE_PROP_ROTATE(&r, width, height, rotation); - * DRM_MODE_ROTATE_inv(&r, width, height, rotation); + * drm_rect_rotate(&r, width, height, rotation); + * drm_rect_rotate_inv(&r, width, height, rotation); * * you will always get back the original rectangle. */ -- cgit v1.2.3 From 5bb562f829bf69223e90e158f654b063d964a291 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Apr 2018 08:51:56 +0200 Subject: drm: Drop DRM_CONTROL_ALLOW from ioctls We've disabled control nodes in commit 8a357d10043c75e980e7fcdb60d2b913491564af Author: Daniel Vetter Date: Fri Oct 28 10:10:50 2016 +0200 drm: Nerf DRM_CONTROL nodes and there was only a minor uapi break that we've paper over with commit 6449b088dd51dd5aa6b38455888bbf538d21f2fc Author: Daniel Vetter Date: Fri Dec 9 14:56:56 2016 +0100 drm: Add fake controlD* symlinks for backwards compat Since then Keith has also added real control nodes with a proper&useable uapi in the form of drm leases. It's time to remove the control node leftovers. Cc: Keith Packard Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Cc: Gustavo Padovan Cc: Maarten Lankhorst Cc: Sean Paul Cc: David Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_ioc32.c | 4 +-- drivers/gpu/drm/drm_ioctl.c | 68 ++++++++++++++++++++++----------------------- 2 files changed, 36 insertions(+), 36 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index f8e96e648acf..67b1fca39aa6 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -105,7 +105,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, .desc = compat_ptr(v32.desc), }; err = drm_ioctl_kernel(file, drm_version, &v, - DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW); + DRM_UNLOCKED|DRM_RENDER_ALLOW); if (err) return err; @@ -885,7 +885,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, return -EFAULT; err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, - DRM_CONTROL_ALLOW|DRM_UNLOCKED); + DRM_UNLOCKED); if (err) return err; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index af782911c505..43f7e2e81294 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -539,7 +539,7 @@ EXPORT_SYMBOL(drm_ioctl_permit); /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, - DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), + DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), @@ -613,41 +613,41 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), @@ -665,10 +665,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) -- cgit v1.2.3 From 0cd54b039537767cc12c4d7b6a62a98d01d99403 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Apr 2018 08:51:57 +0200 Subject: drm/i915: Drop DRM_CONTROL_ALLOW Control nodes are no more! Reviewed-by: Joonas Lahtinen Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: intel-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-2-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/i915_drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 07c07d55398b..154414832d86 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2816,10 +2816,10 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), -- cgit v1.2.3 From 190c462d5be19ba622a82f5fd0625087c870a1e6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Apr 2018 08:51:58 +0200 Subject: drm/vmwgfx: Drop DRM_CONTROL_ALLOW Control nodes are no more! Reviewed-by: Thomas Hellstrom Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Cc: VMware Graphics Cc: Sinclair Yeh Cc: Thomas Hellstrom Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-3-daniel.vetter@ffwll.ch --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 70e1a8820a7c..97f37c3c16f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -159,14 +159,14 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_RENDER_ALLOW), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_MASTER), VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_MASTER), VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_MASTER), VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, - DRM_MASTER | DRM_CONTROL_ALLOW), + DRM_MASTER), VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), -- cgit v1.2.3 From 95aee35fe10dc6c86498c5dd5f06bd5fb5af723a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 27 Apr 2018 08:17:14 +0200 Subject: drm: Remove unecessary dma_fence_ops dma_fence_default_wait is the default now, same for the trivial enable_signaling implementation. Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Cc: Gustavo Padovan Cc: Maarten Lankhorst Cc: Sean Paul Cc: David Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20180427061724.28497-8-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_crtc.c | 7 ------- drivers/gpu/drm/drm_syncobj.c | 1 - drivers/gpu/drm/scheduler/sched_fence.c | 11 ----------- 3 files changed, 19 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a231dd5dce16..e4d3285f4191 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence) return crtc->timeline_name; } -static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence) -{ - return true; -} - static const struct dma_fence_ops drm_crtc_fence_ops = { .get_driver_name = drm_crtc_fence_get_driver_name, .get_timeline_name = drm_crtc_fence_get_timeline_name, - .enable_signaling = drm_crtc_fence_enable_signaling, - .wait = dma_fence_default_wait, }; struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index d4f4ce484529..adb3cb27d31e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = { .get_driver_name = drm_syncobj_null_fence_get_name, .get_timeline_name = drm_syncobj_null_fence_get_name, .enable_signaling = drm_syncobj_null_fence_enable_signaling, - .wait = dma_fence_default_wait, .release = NULL, }; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 69aab086b913..4843289cc8f0 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) return (const char *)fence->sched->name; } -static bool drm_sched_fence_enable_signaling(struct dma_fence *f) -{ - return true; -} - /** * amd_sched_fence_free - free up the fence memory * @@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, - .enable_signaling = drm_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, .release = drm_sched_fence_release_scheduled, }; const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, - .enable_signaling = drm_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, .release = drm_sched_fence_release_finished, }; -- cgit v1.2.3 From bf3012ada1b2222e770de5c35c1bb16f73b3a01d Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 2 May 2018 10:23:25 +0200 Subject: drm/qxl: Remove unecessary dma_fence_ops The trivial enable_signaling implementation matches the default code. v2: Fix up commit message to match patch better (Eric). Cc: Eric Anholt Reviewed-by: Eric Anholt Signed-off-by: Daniel Vetter Cc: Dave Airlie Cc: Gerd Hoffmann Cc: virtualization@lists.linux-foundation.org Link: https://patchwork.freedesktop.org/patch/msgid/20180502082325.30264-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/qxl/qxl_release.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 5d84a66fed36..04f3605ac42a 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence) return "release"; } -static bool qxl_nop_signaling(struct dma_fence *fence) -{ - /* fences are always automatically signaled, so just pretend we did this.. */ - return true; -} - static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { @@ -119,7 +113,6 @@ signaled: static const struct dma_fence_ops qxl_fence_ops = { .get_driver_name = qxl_get_driver_name, .get_timeline_name = qxl_get_timeline_name, - .enable_signaling = qxl_nop_signaling, .wait = qxl_fence_wait, }; -- cgit v1.2.3 From 51f170a544bdb06d93316d8ff0814a52daa24a6c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 3 May 2018 12:31:38 +0200 Subject: Revert 190c462d5be19ba622a82f5fd0625087c870a1e6..bf3012ada1b2222e770de5c35c1bb16f73b3a01d" I shouldn't have pushed this, CI was right - I failed to remove the BUG_ON(!ops->wait); Reported-by: Chris Wilson Acked-by: Chris Wilson Signed-off-by: Daniel Vetter --- drivers/dma-buf/dma-fence-array.c | 1 + drivers/dma-buf/dma-fence.c | 23 +++++++---------------- drivers/dma-buf/sw_sync.c | 1 + drivers/gpu/drm/drm_crtc.c | 7 +++++++ drivers/gpu/drm/drm_syncobj.c | 1 + drivers/gpu/drm/qxl/qxl_release.c | 7 +++++++ drivers/gpu/drm/scheduler/sched_fence.c | 11 +++++++++++ include/linux/dma-fence.h | 32 +++++++++++++++++++------------- 8 files changed, 54 insertions(+), 29 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index a8c254497251..dd1edfb27b61 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -104,6 +104,7 @@ const struct dma_fence_ops dma_fence_array_ops = { .get_timeline_name = dma_fence_array_get_timeline_name, .enable_signaling = dma_fence_array_enable_signaling, .signaled = dma_fence_array_signaled, + .wait = dma_fence_default_wait, .release = dma_fence_array_release, }; EXPORT_SYMBOL(dma_fence_array_ops); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 30fcbe415ff4..4edb9fd3cf47 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -158,10 +158,7 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) return -EINVAL; trace_dma_fence_wait_start(fence); - if (fence->ops->wait) - ret = fence->ops->wait(fence, intr, timeout); - else - ret = dma_fence_default_wait(fence, intr, timeout); + ret = fence->ops->wait(fence, intr, timeout); trace_dma_fence_wait_end(fence); return ret; } @@ -184,13 +181,6 @@ void dma_fence_release(struct kref *kref) } EXPORT_SYMBOL(dma_fence_release); -/** - * dma_fence_free - default release function for &dma_fence. - * @fence: fence to release - * - * This is the default implementation for &dma_fence_ops.release. It calls - * kfree_rcu() on @fence. - */ void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); @@ -506,6 +496,11 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; + if (fence->ops->wait != dma_fence_default_wait) { + ret = -EINVAL; + goto fence_rm_cb; + } + cb[i].task = current; if (dma_fence_add_callback(fence, &cb[i].base, dma_fence_default_wait_cb)) { @@ -565,7 +560,7 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); - BUG_ON(!ops || !ops->wait || + BUG_ON(!ops || !ops->wait || !ops->enable_signaling || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount); @@ -577,10 +572,6 @@ dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, fence->flags = 0UL; fence->error = 0; - if (!ops->enable_signaling) - set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - trace_dma_fence_init(fence); } EXPORT_SYMBOL(dma_fence_init); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a64..3d78ca89a605 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -188,6 +188,7 @@ static const struct dma_fence_ops timeline_fence_ops = { .get_timeline_name = timeline_fence_get_timeline_name, .enable_signaling = timeline_fence_enable_signaling, .signaled = timeline_fence_signaled, + .wait = dma_fence_default_wait, .release = timeline_fence_release, .fence_value_str = timeline_fence_value_str, .timeline_value_str = timeline_fence_timeline_value_str, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e4d3285f4191..a231dd5dce16 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -225,9 +225,16 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence) return crtc->timeline_name; } +static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + static const struct dma_fence_ops drm_crtc_fence_ops = { .get_driver_name = drm_crtc_fence_get_driver_name, .get_timeline_name = drm_crtc_fence_get_timeline_name, + .enable_signaling = drm_crtc_fence_enable_signaling, + .wait = dma_fence_default_wait, }; struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index adb3cb27d31e..d4f4ce484529 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -207,6 +207,7 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = { .get_driver_name = drm_syncobj_null_fence_get_name, .get_timeline_name = drm_syncobj_null_fence_get_name, .enable_signaling = drm_syncobj_null_fence_enable_signaling, + .wait = dma_fence_default_wait, .release = NULL, }; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 04f3605ac42a..5d84a66fed36 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -50,6 +50,12 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence) return "release"; } +static bool qxl_nop_signaling(struct dma_fence *fence) +{ + /* fences are always automatically signaled, so just pretend we did this.. */ + return true; +} + static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { @@ -113,6 +119,7 @@ signaled: static const struct dma_fence_ops qxl_fence_ops = { .get_driver_name = qxl_get_driver_name, .get_timeline_name = qxl_get_timeline_name, + .enable_signaling = qxl_nop_signaling, .wait = qxl_fence_wait, }; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 4843289cc8f0..69aab086b913 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -81,6 +81,11 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) return (const char *)fence->sched->name; } +static bool drm_sched_fence_enable_signaling(struct dma_fence *f) +{ + return true; +} + /** * amd_sched_fence_free - free up the fence memory * @@ -129,12 +134,18 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, .release = drm_sched_fence_release_scheduled, }; const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, .release = drm_sched_fence_release_finished, }; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..eb9b05aa5aea 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -166,8 +166,7 @@ struct dma_fence_ops { * released when the fence is signalled (through e.g. the interrupt * handler). * - * This callback is optional. If this callback is not present, then the - * driver must always have signaling enabled. + * This callback is mandatory. */ bool (*enable_signaling)(struct dma_fence *fence); @@ -191,14 +190,11 @@ struct dma_fence_ops { /** * @wait: * - * Custom wait implementation, defaults to dma_fence_default_wait() if - * not set. + * Custom wait implementation, or dma_fence_default_wait. * - * The dma_fence_default_wait implementation should work for any fence, as long - * as @enable_signaling works correctly. This hook allows drivers to - * have an optimized version for the case where a process context is - * already available, e.g. if @enable_signaling for the general case - * needs to set up a worker thread. + * Must not be NULL, set to dma_fence_default_wait for default implementation. + * the dma_fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait @@ -206,7 +202,7 @@ struct dma_fence_ops { * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. * - * This callback is optional. + * This callback is mandatory. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); @@ -221,6 +217,17 @@ struct dma_fence_ops { */ void (*release)(struct dma_fence *fence); + /** + * @fill_driver_data: + * + * Callback to fill in free-form debug info. + * + * Returns amount of bytes filled, or negative error on failure. + * + * This callback is optional. + */ + int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); + /** * @fence_value_str: * @@ -235,9 +242,8 @@ struct dma_fence_ops { * @timeline_value_str: * * Fills in the current value of the timeline as a string, like the - * sequence number. Note that the specific fence passed to this function - * should not matter, drivers should only use it to look up the - * corresponding timeline structures. + * sequence number. This should match what @fill_driver_data prints for + * the most recently signalled fence (assuming no delayed signalling). */ void (*timeline_value_str)(struct dma_fence *fence, char *str, int size); -- cgit v1.2.3 From fc2a69f3903dfd97cd47f593e642b47918c949df Mon Sep 17 00:00:00 2001 From: Satendra Singh Thakur Date: Thu, 3 May 2018 11:19:32 +0530 Subject: drm/atomic: Handling the case when setting old crtc for plane In the func drm_atomic_set_crtc_for_plane, with the current code, if crtc of the plane_state and crtc passed as argument to the func are same, entire func will executed in vein. It will get state of crtc and clear and set the bits in plane_mask. All these steps are not required for same old crtc. Ideally, we should do nothing in this case, this patch handles the same, and causes the program to return without doing anything in such scenario. Signed-off-by: Satendra Singh Thakur Cc: Madhur Verma Cc: Hemanshu Srivastava Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1525326572-25854-1-git-send-email-satendra.t@samsung.com --- drivers/gpu/drm/drm_atomic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 9bdd67781917..dc850b4b6e21 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1425,7 +1425,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, { struct drm_plane *plane = plane_state->plane; struct drm_crtc_state *crtc_state; - + /* Nothing to do for same crtc*/ + if (plane_state->crtc == crtc) + return 0; if (plane_state->crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); -- cgit v1.2.3 From 9e1de9002190b712a264a21f31ee9692f6d0bc2e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 3 May 2018 11:31:07 +0200 Subject: drm/msm: Don't setup control node debugfs files It's going away. v2: Try harder to find them all. Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Cc: Rob Clark Cc: Jordan Crouse Cc: Nicolas Dechesne Cc: Archit Taneja Cc: Bjorn Andersson Cc: Arnd Bergmann Cc: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180503093107.25955-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/msm/adreno/adreno_device.c | 1 - drivers/gpu/drm/msm/msm_debugfs.c | 3 --- 2 files changed, 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 8e0cb161754b..0ae5ace65462 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -168,7 +168,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) if (gpu->funcs->debugfs_init) { gpu->funcs->debugfs_init(gpu, dev->primary); gpu->funcs->debugfs_init(gpu, dev->render); - gpu->funcs->debugfs_init(gpu, dev->control); } #endif diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index ba74cb4f94df..1ff3fda245d1 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -140,9 +140,6 @@ int msm_debugfs_late_init(struct drm_device *dev) if (ret) return ret; ret = late_init_minor(dev->render); - if (ret) - return ret; - ret = late_init_minor(dev->control); return ret; } -- cgit v1.2.3 From 0d49f303e8a7006e0af3b58ed3809e1cad0900fb Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 20 Apr 2018 08:51:59 +0200 Subject: drm: remove all control node code With the ioctl and driver prep done, we can remove everything else. Reviewed-by: Sean Paul Signed-off-by: Daniel Vetter Cc: Gustavo Padovan Cc: Maarten Lankhorst Cc: Sean Paul Cc: David Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20180420065159.4531-4-daniel.vetter@ffwll.ch --- drivers/gpu/drm/drm_drv.c | 10 ---------- drivers/gpu/drm/drm_framebuffer.c | 3 +-- drivers/gpu/drm/drm_ioctl.c | 8 +------- drivers/gpu/drm/drm_sysfs.c | 4 +--- include/drm/drm_device.h | 1 - include/drm/drm_file.h | 13 ------------- include/drm/drm_ioctl.h | 7 ------- 7 files changed, 3 insertions(+), 43 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 32a83b41ab61..f6910ebe4d0e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -99,8 +99,6 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, return &dev->primary; case DRM_MINOR_RENDER: return &dev->render; - case DRM_MINOR_CONTROL: - return &dev->control; default: BUG(); } @@ -567,7 +565,6 @@ err_ctxbitmap: err_minors: drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); - drm_minor_free(dev, DRM_MINOR_CONTROL); drm_fs_inode_free(dev->anon_inode); err_free: mutex_destroy(&dev->master_mutex); @@ -603,7 +600,6 @@ void drm_dev_fini(struct drm_device *dev) drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); - drm_minor_free(dev, DRM_MINOR_CONTROL); mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); @@ -796,10 +792,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) mutex_lock(&drm_global_mutex); - ret = drm_minor_register(dev, DRM_MINOR_CONTROL); - if (ret) - goto err_minors; - ret = drm_minor_register(dev, DRM_MINOR_RENDER); if (ret) goto err_minors; @@ -837,7 +829,6 @@ err_minors: remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); out_unlock: mutex_unlock(&drm_global_mutex); return ret; @@ -882,7 +873,6 @@ void drm_dev_unregister(struct drm_device *dev) remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); } EXPORT_SYMBOL(drm_dev_unregister); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 8c4d32adcc17..bfedceff87bb 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -484,8 +484,7 @@ int drm_mode_getfb(struct drm_device *dev, * backwards-compatibility reasons, we cannot make GET_FB() privileged, * so just return an invalid handle for non-masters. */ - if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) && - !drm_is_control_client(file_priv)) { + if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) { r->handle = 0; ret = 0; goto out; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 43f7e2e81294..eadeabc393f0 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -510,13 +510,7 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) /* MASTER is only for master or control clients */ if (unlikely((flags & DRM_MASTER) && - !drm_is_current_master(file_priv) && - !drm_is_control_client(file_priv))) - return -EACCES; - - /* Control clients must be explicitly allowed */ - if (unlikely(!(flags & DRM_CONTROL_ALLOW) && - drm_is_control_client(file_priv))) + !drm_is_current_master(file_priv))) return -EACCES; /* Render clients must be explicitly allowed */ diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 1c5b5ce1fd7f..b3c1daad1169 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -331,9 +331,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) struct device *kdev; int r; - if (minor->type == DRM_MINOR_CONTROL) - minor_str = "controlD%d"; - else if (minor->type == DRM_MINOR_RENDER) + if (minor->type == DRM_MINOR_RENDER) minor_str = "renderD%d"; else minor_str = "card%d"; diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 3a0eac2885b7..858ba19a3e29 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -38,7 +38,6 @@ struct drm_device { struct device *dev; /**< Device structure of bus-device */ struct drm_driver *driver; /**< DRM driver managing the device */ void *dev_private; /**< DRM driver private data */ - struct drm_minor *control; /**< Control node */ struct drm_minor *primary; /**< Primary node */ struct drm_minor *render; /**< Render node */ bool registered; diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 5176c3797680..99ab50cbab00 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -49,7 +49,6 @@ struct device; enum drm_minor_type { DRM_MINOR_PRIMARY, - DRM_MINOR_CONTROL, DRM_MINOR_RENDER, }; @@ -348,18 +347,6 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv) return file_priv->minor->type == DRM_MINOR_RENDER; } -/** - * drm_is_control_client - is this an open file of the control node - * @file_priv: DRM file - * - * Control nodes are deprecated and in the process of getting removed from the - * DRM userspace API. Do not ever use! - */ -static inline bool drm_is_control_client(const struct drm_file *file_priv) -{ - return file_priv->minor->type == DRM_MINOR_CONTROL; -} - int drm_open(struct inode *inode, struct file *filp); ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index add42809642a..fafb6f592c4b 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -108,13 +108,6 @@ enum drm_ioctl_flags { * This is equivalent to callers with the SYSADMIN capability. */ DRM_ROOT_ONLY = BIT(2), - /** - * @DRM_CONTROL_ALLOW: - * - * Deprecated, do not use. Control nodes are in the process of getting - * removed. - */ - DRM_CONTROL_ALLOW = BIT(3), /** * @DRM_UNLOCKED: * -- cgit v1.2.3 From 0a4587a034a43e5076770df10446214cfb3de8f8 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 3 May 2018 16:04:31 +0200 Subject: drm/pl111: Fix module probe bug Commit a30933c27602 ("drm/pl111: Support the Versatile Express") Added a second module using the builtin_platform_driver() call, which works fine as long as you do not try to build the PL111 driver as a module, because a module can only have one initcall and cause the following build bug: (...) multiple definition of `init_module' (...) Reported-by: Daniel Vetter Cc: Liviu Dudau Cc: Pawel Moll Cc: Eric Anholt Cc: Robin Murphy Reviewed-by: Sean Paul Reviewed-by: Daniel Vetter Fixes: a30933c27602 ("drm/pl111: Support the Versatile Express") Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/20180503140431.5798-1-linus.walleij@linaro.org --- drivers/gpu/drm/pl111/pl111_versatile.c | 7 +++++++ drivers/gpu/drm/pl111/pl111_vexpress.c | 11 ++++++++++- drivers/gpu/drm/pl111/pl111_vexpress.h | 7 +++++++ 3 files changed, 24 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index 78ddf8534fd2..b9baefdba38a 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -326,6 +326,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) if (versatile_clcd_type == VEXPRESS_CLCD_V2M) { struct platform_device *pdev; + /* Registers a driver for the muxfpga */ + ret = vexpress_muxfpga_init(); + if (ret) { + dev_err(dev, "unable to initialize muxfpga driver\n"); + return ret; + } + /* Call into deep Vexpress configuration API */ pdev = of_find_device_by_node(np); if (!pdev) { diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index c9fee625faf1..a534b225e31b 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c @@ -122,4 +122,13 @@ static struct platform_driver vexpress_muxfpga_driver = { .probe = vexpress_muxfpga_probe, }; -builtin_platform_driver(vexpress_muxfpga_driver); +int vexpress_muxfpga_init(void) +{ + int ret; + + ret = platform_driver_register(&vexpress_muxfpga_driver); + /* -EBUSY just means this driver is already registered */ + if (ret == -EBUSY) + ret = 0; + return ret; +} diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h index bb54864ca91e..5d3681bb4c00 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.h +++ b/drivers/gpu/drm/pl111/pl111_vexpress.h @@ -10,6 +10,8 @@ int pl111_vexpress_clcd_init(struct device *dev, struct pl111_drm_dev_private *priv, struct regmap *map); +int vexpress_muxfpga_init(void); + #else static inline int pl111_vexpress_clcd_init(struct device *dev, @@ -19,4 +21,9 @@ static inline int pl111_vexpress_clcd_init(struct device *dev, return -ENODEV; } +static inline int vexpress_muxfpga_init(void) +{ + return 0; +} + #endif -- cgit v1.2.3 From 4c70ac7639f6af6d7c2d01f0307665a4b9afada7 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Apr 2018 16:59:27 -0700 Subject: drm/vc4: Add a pad field to align drm_vc4_submit_cl to 64 bits. I had originally asked Stefan Schake to drop the pad field from the syncobj changes that just landed, because I couldn't come up with a reason to align to 64 bits. Talking with Dave Airlie about the new v3d driver's submit ioctl, we came up with a reason: sizeof() on 64-bit platforms may align to 64 bits, in which case the userspace will be submitting the aligned size and the final 32 bits won't be zero-padded by the kernel. If userspace doesn't zero-fill, then a future ABI change adding a 32-bit field at the end could potentially cause the kernel to read undefined data from old userspace (our userspace happens to use structure initialization that zero-fills, but as a general rule we try not to rely on that in the kernel). Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180430235927.28712-1-eric@anholt.net Reviewed-by: Stefan Schake --- drivers/gpu/drm/vc4/vc4_gem.c | 5 +++++ include/uapi/drm/vc4_drm.h | 2 ++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index a4c4be3ac6af..7910b9acedd6 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -1132,6 +1132,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + if (args->pad2 != 0) { + DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2); + return -EINVAL; + } + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); if (!exec) { DRM_ERROR("malloc failure on exec struct\n"); diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h index 2be4fe3610b8..2cac6277a1d7 100644 --- a/include/uapi/drm/vc4_drm.h +++ b/include/uapi/drm/vc4_drm.h @@ -193,6 +193,8 @@ struct drm_vc4_submit_cl { * render job. 0 means ignore. */ __u32 out_sync; + + __u32 pad2; }; /** -- cgit v1.2.3 From 57692c94dcbe99a1e0444409a3da13fb3443562c Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 30 Apr 2018 11:10:58 -0700 Subject: drm/v3d: Introduce a new DRM driver for Broadcom V3D V3.x+ This driver will be used to support Mesa on the Broadcom 7268 and 7278 platforms. V3D 3.3 introduces an MMU, which means we no longer need CMA or vc4's complicated CL/shader validation scheme. This massively changes the GEM behavior, so I've forked off to a new driver. v2: Mark SUBMIT_CL as needing DRM_AUTH. coccinelle fixes from kbuild test robot. Drop personal git link from MAINTAINERS. Don't double-map dma-buf imported BOs. Add kerneldoc about needing MMU eviction. Drop prime vmap/unmap stubs. Delay mmap offset setup to mmap time. Use drm_dev_init instead of _alloc. Use ktime_get() for wait_bo timeouts. Drop drm_can_sleep() usage, since we don't modeset. Switch page tables back to WC (debug change to coherent had slipped in). Switch drm_gem_object_unreference_unlocked() to drm_gem_object_put_unlocked(). Simplify overflow mem handling by not sharing overflow mem between jobs. v3: no changes v4: align submit_cl to 64 bits (review by airlied), check zero flags in other ioctls. Signed-off-by: Eric Anholt Acked-by: Daniel Vetter (v4) Acked-by: Dave Airlie (v3, requested submit_cl change) Link: https://patchwork.freedesktop.org/patch/msgid/20180430181058.30181-3-eric@anholt.net --- Documentation/gpu/drivers.rst | 1 + MAINTAINERS | 8 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/v3d/Kconfig | 9 + drivers/gpu/drm/v3d/Makefile | 18 + drivers/gpu/drm/v3d/v3d_bo.c | 389 +++++++++++++++++++ drivers/gpu/drm/v3d/v3d_debugfs.c | 191 ++++++++++ drivers/gpu/drm/v3d/v3d_drv.c | 371 ++++++++++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 294 +++++++++++++++ drivers/gpu/drm/v3d/v3d_fence.c | 58 +++ drivers/gpu/drm/v3d/v3d_gem.c | 668 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_irq.c | 206 ++++++++++ drivers/gpu/drm/v3d/v3d_mmu.c | 122 ++++++ drivers/gpu/drm/v3d/v3d_regs.h | 295 +++++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 228 +++++++++++ drivers/gpu/drm/v3d/v3d_trace.h | 82 ++++ drivers/gpu/drm/v3d/v3d_trace_points.c | 9 + include/uapi/drm/v3d_drm.h | 194 ++++++++++ 19 files changed, 3146 insertions(+) create mode 100644 drivers/gpu/drm/v3d/Kconfig create mode 100644 drivers/gpu/drm/v3d/Makefile create mode 100644 drivers/gpu/drm/v3d/v3d_bo.c create mode 100644 drivers/gpu/drm/v3d/v3d_debugfs.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.h create mode 100644 drivers/gpu/drm/v3d/v3d_fence.c create mode 100644 drivers/gpu/drm/v3d/v3d_gem.c create mode 100644 drivers/gpu/drm/v3d/v3d_irq.c create mode 100644 drivers/gpu/drm/v3d/v3d_mmu.c create mode 100644 drivers/gpu/drm/v3d/v3d_regs.h create mode 100644 drivers/gpu/drm/v3d/v3d_sched.c create mode 100644 drivers/gpu/drm/v3d/v3d_trace.h create mode 100644 drivers/gpu/drm/v3d/v3d_trace_points.c create mode 100644 include/uapi/drm/v3d_drm.h (limited to 'drivers/gpu') diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst index d3ab6abae838..f982558fc25d 100644 --- a/Documentation/gpu/drivers.rst +++ b/Documentation/gpu/drivers.rst @@ -10,6 +10,7 @@ GPU Driver Documentation tegra tinydrm tve200 + v3d vc4 bridge/dw-hdmi xen-front diff --git a/MAINTAINERS b/MAINTAINERS index 4af7f6119530..631a16f7fa19 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4795,6 +4795,14 @@ S: Maintained F: drivers/gpu/drm/omapdrm/ F: Documentation/devicetree/bindings/display/ti/ +DRM DRIVERS FOR V3D +M: Eric Anholt +S: Supported +F: drivers/gpu/drm/v3d/ +F: include/uapi/drm/v3d_drm.h +F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt +T: git git://anongit.freedesktop.org/drm/drm-misc + DRM DRIVERS FOR VC4 M: Eric Anholt T: git git://github.com/anholt/linux diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 757825ac60df..1c73a455fdb1 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -267,6 +267,8 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig" source "drivers/gpu/drm/imx/Kconfig" +source "drivers/gpu/drm/v3d/Kconfig" + source "drivers/gpu/drm/vc4/Kconfig" source "drivers/gpu/drm/etnaviv/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 9d66657ea117..7a401edd8761 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ +obj-$(CONFIG_DRM_V3D) += v3d/ obj-$(CONFIG_DRM_VC4) += vc4/ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ obj-$(CONFIG_DRM_SIS) += sis/ diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig new file mode 100644 index 000000000000..a0c0259355bd --- /dev/null +++ b/drivers/gpu/drm/v3d/Kconfig @@ -0,0 +1,9 @@ +config DRM_V3D + tristate "Broadcom V3D 3.x and newer" + depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST + depends on DRM + depends on COMMON_CLK + select DRM_SCHED + help + Choose this option if you have a system that has a Broadcom + V3D 3.x or newer GPU, such as BCM7268. diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile new file mode 100644 index 000000000000..34446e1de64f --- /dev/null +++ b/drivers/gpu/drm/v3d/Makefile @@ -0,0 +1,18 @@ +# Please keep these build lists sorted! + +# core driver code +v3d-y := \ + v3d_bo.o \ + v3d_drv.o \ + v3d_fence.o \ + v3d_gem.o \ + v3d_irq.o \ + v3d_mmu.o \ + v3d_trace_points.o \ + v3d_sched.o + +v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o + +obj-$(CONFIG_DRM_V3D) += v3d.o + +CFLAGS_v3d_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c new file mode 100644 index 000000000000..7b1e2a549a71 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +/** + * DOC: V3D GEM BO management support + * + * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the + * GPU and the bus, allowing us to use shmem objects for our storage + * instead of CMA. + * + * Physically contiguous objects may still be imported to V3D, but the + * driver doesn't allocate physically contiguous objects on its own. + * Display engines requiring physically contiguous allocations should + * look into Mesa's "renderonly" support (as used by the Mesa pl111 + * driver) for an example of how to integrate with V3D. + * + * Long term, we should support evicting pages from the MMU when under + * memory pressure (thus the v3d_bo_get_pages() refcounting), but + * that's not a high priority since our systems tend to not have swap. + */ + +#include +#include + +#include "v3d_drv.h" +#include "uapi/drm/v3d_drm.h" + +/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps + * it for DMA. + */ +static int +v3d_bo_get_pages(struct v3d_bo *bo) +{ + struct drm_gem_object *obj = &bo->base; + struct drm_device *dev = obj->dev; + int npages = obj->size >> PAGE_SHIFT; + int ret = 0; + + mutex_lock(&bo->lock); + if (bo->pages_refcount++ != 0) + goto unlock; + + if (!obj->import_attach) { + bo->pages = drm_gem_get_pages(obj); + if (IS_ERR(bo->pages)) { + ret = PTR_ERR(bo->pages); + goto unlock; + } + + bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); + if (IS_ERR(bo->sgt)) { + ret = PTR_ERR(bo->sgt); + goto put_pages; + } + + /* Map the pages for use by the GPU. */ + dma_map_sg(dev->dev, bo->sgt->sgl, + bo->sgt->nents, DMA_BIDIRECTIONAL); + } else { + bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); + if (!bo->pages) + goto put_pages; + + drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, + NULL, npages); + + /* Note that dma-bufs come in mapped. */ + } + + mutex_unlock(&bo->lock); + + return 0; + +put_pages: + drm_gem_put_pages(obj, bo->pages, true, true); + bo->pages = NULL; +unlock: + bo->pages_refcount--; + mutex_unlock(&bo->lock); + return ret; +} + +static void +v3d_bo_put_pages(struct v3d_bo *bo) +{ + struct drm_gem_object *obj = &bo->base; + + mutex_lock(&bo->lock); + if (--bo->pages_refcount == 0) { + if (!obj->import_attach) { + dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, + bo->sgt->nents, DMA_BIDIRECTIONAL); + sg_free_table(bo->sgt); + kfree(bo->sgt); + drm_gem_put_pages(obj, bo->pages, true, true); + } else { + kfree(bo->pages); + } + } + mutex_unlock(&bo->lock); +} + +static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, + size_t unaligned_size) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_gem_object *obj; + struct v3d_bo *bo; + size_t size = roundup(unaligned_size, PAGE_SIZE); + int ret; + + if (size == 0) + return ERR_PTR(-EINVAL); + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + obj = &bo->base; + + INIT_LIST_HEAD(&bo->vmas); + INIT_LIST_HEAD(&bo->unref_head); + mutex_init(&bo->lock); + + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto free_bo; + + spin_lock(&v3d->mm_lock); + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, + obj->size >> PAGE_SHIFT, + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + spin_unlock(&v3d->mm_lock); + if (ret) + goto free_obj; + + return bo; + +free_obj: + drm_gem_object_release(obj); +free_bo: + kfree(bo); + return ERR_PTR(ret); +} + +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t unaligned_size) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_gem_object *obj; + struct v3d_bo *bo; + int ret; + + bo = v3d_bo_create_struct(dev, unaligned_size); + if (IS_ERR(bo)) + return bo; + obj = &bo->base; + + bo->resv = &bo->_resv; + reservation_object_init(bo->resv); + + ret = v3d_bo_get_pages(bo); + if (ret) + goto free_mm; + + v3d_mmu_insert_ptes(bo); + + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated++; + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + return bo; + +free_mm: + spin_lock(&v3d->mm_lock); + drm_mm_remove_node(&bo->node); + spin_unlock(&v3d->mm_lock); + + drm_gem_object_release(obj); + kfree(bo); + return ERR_PTR(ret); +} + +/* Called DRM core on the last userspace/kernel unreference of the + * BO. + */ +void v3d_free_object(struct drm_gem_object *obj) +{ + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated--; + v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + reservation_object_fini(&bo->_resv); + + v3d_bo_put_pages(bo); + + if (obj->import_attach) + drm_prime_gem_destroy(obj, bo->sgt); + + v3d_mmu_remove_ptes(bo); + spin_lock(&v3d->mm_lock); + drm_mm_remove_node(&bo->node); + spin_unlock(&v3d->mm_lock); + + mutex_destroy(&bo->lock); + + drm_gem_object_release(obj); + kfree(bo); +} + +struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + + return bo->resv; +} + +static void +v3d_set_mmap_vma_flags(struct vm_area_struct *vma) +{ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); +} + +int v3d_gem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct v3d_bo *bo = to_v3d_bo(obj); + unsigned long pfn; + pgoff_t pgoff; + int ret; + + /* We don't use vmf->pgoff since that has the fake offset: */ + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + pfn = page_to_pfn(bo->pages[pgoff]); + + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); + + switch (ret) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + +int v3d_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + v3d_set_mmap_vma_flags(vma); + + return ret; +} + +int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap_obj(obj, obj->size, vma); + if (ret < 0) + return ret; + + v3d_set_mmap_vma_flags(vma); + + return 0; +} + +struct sg_table * +v3d_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + int npages = obj->size >> PAGE_SHIFT; + + return drm_prime_pages_to_sg(bo->pages, npages); +} + +struct drm_gem_object * +v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct drm_gem_object *obj; + struct v3d_bo *bo; + + bo = v3d_bo_create_struct(dev, attach->dmabuf->size); + if (IS_ERR(bo)) + return ERR_CAST(bo); + obj = &bo->base; + + bo->resv = attach->dmabuf->resv; + + bo->sgt = sgt; + v3d_bo_get_pages(bo); + + v3d_mmu_insert_ptes(bo); + + return obj; +} + +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_create_bo *args = data; + struct v3d_bo *bo = NULL; + int ret; + + if (args->flags != 0) { + DRM_INFO("unknown create_bo flags: %d\n", args->flags); + return -EINVAL; + } + + bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + args->offset = bo->node.start << PAGE_SHIFT; + + ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); + drm_gem_object_put_unlocked(&bo->base); + + return ret; +} + +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_mmap_bo *args = data; + struct drm_gem_object *gem_obj; + int ret; + + if (args->flags != 0) { + DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); + return -EINVAL; + } + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret == 0) + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + drm_gem_object_put_unlocked(gem_obj); + + return ret; +} + +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_get_bo_offset *args = data; + struct drm_gem_object *gem_obj; + struct v3d_bo *bo; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + bo = to_v3d_bo(gem_obj); + + args->offset = bo->node.start << PAGE_SHIFT; + + drm_gem_object_put_unlocked(gem_obj); + return 0; +} diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c new file mode 100644 index 000000000000..4db62c545748 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define REGDEF(reg) { reg, #reg } +struct v3d_reg_def { + u32 reg; + const char *name; +}; + +static const struct v3d_reg_def v3d_hub_reg_defs[] = { + REGDEF(V3D_HUB_AXICFG), + REGDEF(V3D_HUB_UIFCFG), + REGDEF(V3D_HUB_IDENT0), + REGDEF(V3D_HUB_IDENT1), + REGDEF(V3D_HUB_IDENT2), + REGDEF(V3D_HUB_IDENT3), + REGDEF(V3D_HUB_INT_STS), + REGDEF(V3D_HUB_INT_MSK_STS), +}; + +static const struct v3d_reg_def v3d_gca_reg_defs[] = { + REGDEF(V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK), +}; + +static const struct v3d_reg_def v3d_core_reg_defs[] = { + REGDEF(V3D_CTL_IDENT0), + REGDEF(V3D_CTL_IDENT1), + REGDEF(V3D_CTL_IDENT2), + REGDEF(V3D_CTL_MISCCFG), + REGDEF(V3D_CTL_INT_STS), + REGDEF(V3D_CTL_INT_MSK_STS), + REGDEF(V3D_CLE_CT0CS), + REGDEF(V3D_CLE_CT0CA), + REGDEF(V3D_CLE_CT0EA), + REGDEF(V3D_CLE_CT1CS), + REGDEF(V3D_CLE_CT1CA), + REGDEF(V3D_CLE_CT1EA), + + REGDEF(V3D_PTB_BPCA), + REGDEF(V3D_PTB_BPCS), + + REGDEF(V3D_MMU_CTL), + REGDEF(V3D_MMU_VIO_ADDR), + + REGDEF(V3D_GMP_STATUS), + REGDEF(V3D_GMP_CFG), + REGDEF(V3D_GMP_VIO_ADDR), +}; + +static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + int i, core; + + for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg, + V3D_READ(v3d_hub_reg_defs[i].reg)); + } + + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg, + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + } + + for (core = 0; core < v3d->cores; core++) { + for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, + v3d_core_reg_defs[i].name, + v3d_core_reg_defs[i].reg, + V3D_CORE_READ(core, + v3d_core_reg_defs[i].reg)); + } + } + + return 0; +} + +static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 ident0, ident1, ident2, ident3, cores; + int ret, core; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) + return ret; + + ident0 = V3D_READ(V3D_HUB_IDENT0); + ident1 = V3D_READ(V3D_HUB_IDENT1); + ident2 = V3D_READ(V3D_HUB_IDENT2); + ident3 = V3D_READ(V3D_HUB_IDENT3); + cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + + seq_printf(m, "Revision: %d.%d.%d.%d\n", + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER), + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX)); + seq_printf(m, "MMU: %s\n", + (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no"); + seq_printf(m, "TFU: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no"); + seq_printf(m, "TSY: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no"); + seq_printf(m, "MSO: %s\n", + (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no"); + seq_printf(m, "L3C: %s (%dkb)\n", + (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no", + V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB)); + + for (core = 0; core < cores; core++) { + u32 misccfg; + u32 nslc, ntmu, qups; + + ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0); + ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1); + ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2); + misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG); + + nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC); + ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU); + qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS); + + seq_printf(m, "Core %d:\n", core); + seq_printf(m, " Revision: %d.%d\n", + V3D_GET_FIELD(ident0, V3D_IDENT0_VER), + V3D_GET_FIELD(ident1, V3D_IDENT1_REV)); + seq_printf(m, " Slices: %d\n", nslc); + seq_printf(m, " TMUs: %d\n", nslc * ntmu); + seq_printf(m, " QPUs: %d\n", nslc * qups); + seq_printf(m, " Semaphores: %d\n", + V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); + seq_printf(m, " BCG int: %d\n", + (ident2 & V3D_IDENT2_BCG_INT) != 0); + seq_printf(m, " Override TMU: %d\n", + (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); + } + + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + + return 0; +} + +static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + + mutex_lock(&v3d->bo_lock); + seq_printf(m, "allocated bos: %d\n", + v3d->bo_stats.num_allocated); + seq_printf(m, "allocated bo size (kb): %ld\n", + (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10)); + mutex_unlock(&v3d->bo_lock); + + return 0; +} + +static const struct drm_info_list v3d_debugfs_list[] = { + {"v3d_ident", v3d_v3d_debugfs_ident, 0}, + {"v3d_regs", v3d_v3d_debugfs_regs, 0}, + {"bo_stats", v3d_debugfs_bo_stats, 0}, +}; + +int +v3d_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(v3d_debugfs_list, + ARRAY_SIZE(v3d_debugfs_list), + minor->debugfs_root, minor); +} diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c new file mode 100644 index 000000000000..38e8041b5f0c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Broadcom V3D Graphics Driver + * + * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. + * For V3D 2.x support, see the VC4 driver. + * + * Currently only single-core rendering using the binner and renderer + * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD + * (compute shader dispatch) are not yet supported. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uapi/drm/v3d_drm.h" +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define DRIVER_NAME "v3d" +#define DRIVER_DESC "Broadcom V3D graphics" +#define DRIVER_DATE "20180419" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#ifdef CONFIG_PM +static int v3d_runtime_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct v3d_dev *v3d = to_v3d_dev(drm); + + v3d_irq_disable(v3d); + + clk_disable_unprepare(v3d->clk); + + return 0; +} + +static int v3d_runtime_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct v3d_dev *v3d = to_v3d_dev(drm); + int ret; + + ret = clk_prepare_enable(v3d->clk); + if (ret != 0) + return ret; + + /* XXX: VPM base */ + + v3d_mmu_set_page_table(v3d); + v3d_irq_enable(v3d); + + return 0; +} +#endif + +static const struct dev_pm_ops v3d_v3d_pm_ops = { + SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL) +}; + +static int v3d_get_param_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_get_param *args = data; + int ret; + static const u32 reg_map[] = { + [DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG, + [DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1, + [DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2, + [DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3, + [DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0, + [DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1, + [DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2, + }; + + if (args->pad != 0) + return -EINVAL; + + /* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need + * to explicitly allow it in the "the register in our + * parameter map" check. + */ + if (args->param < ARRAY_SIZE(reg_map) && + (reg_map[args->param] || + args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) { + u32 offset = reg_map[args->param]; + + if (args->value != 0) + return -EINVAL; + + ret = pm_runtime_get_sync(v3d->dev); + if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && + args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) { + args->value = V3D_CORE_READ(0, offset); + } else { + args->value = V3D_READ(offset); + } + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + return 0; + } + + /* Any params that aren't just register reads would go here. */ + + DRM_DEBUG("Unknown parameter %d\n", args->param); + return -EINVAL; +} + +static int +v3d_open(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv; + int i; + + v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); + if (!v3d_priv) + return -ENOMEM; + + v3d_priv->v3d = v3d; + + for (i = 0; i < V3D_MAX_QUEUES; i++) { + drm_sched_entity_init(&v3d->queue[i].sched, + &v3d_priv->sched_entity[i], + &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], + 32, NULL); + } + + file->driver_priv = v3d_priv; + + return 0; +} + +static void +v3d_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file->driver_priv; + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) { + drm_sched_entity_fini(&v3d->queue[q].sched, + &v3d_priv->sched_entity[q]); + } + + kfree(v3d_priv); +} + +static const struct file_operations v3d_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = v3d_mmap, + .poll = drm_poll, + .read = drm_read, + .compat_ioctl = drm_compat_ioctl, + .llseek = noop_llseek, +}; + +/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP + * protection between clients. Note that render nodes would be be + * able to submit CLs that could access BOs from clients authenticated + * with the master node. + */ +static const struct drm_ioctl_desc v3d_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), +}; + +static const struct vm_operations_struct v3d_vm_ops = { + .fault = v3d_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct drm_driver v3d_drm_driver = { + .driver_features = (DRIVER_GEM | + DRIVER_RENDER | + DRIVER_PRIME | + DRIVER_SYNCOBJ), + + .open = v3d_open, + .postclose = v3d_postclose, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = v3d_debugfs_init, +#endif + + .gem_free_object_unlocked = v3d_free_object, + .gem_vm_ops = &v3d_vm_ops, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_res_obj = v3d_prime_res_obj, + .gem_prime_get_sg_table = v3d_prime_get_sg_table, + .gem_prime_import_sg_table = v3d_prime_import_sg_table, + .gem_prime_mmap = v3d_prime_mmap, + + .ioctls = v3d_drm_ioctls, + .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), + .fops = &v3d_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static const struct of_device_id v3d_of_match[] = { + { .compatible = "brcm,7268-v3d" }, + { .compatible = "brcm,7278-v3d" }, + {}, +}; +MODULE_DEVICE_TABLE(of, v3d_of_match); + +static int +map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) +{ + struct resource *res = + platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name); + + *regs = devm_ioremap_resource(v3d->dev, res); + return PTR_ERR_OR_ZERO(*regs); +} + +static int v3d_platform_drm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct drm_device *drm; + struct v3d_dev *v3d; + int ret; + u32 ident1; + + dev->coherent_dma_mask = DMA_BIT_MASK(36); + + v3d = kzalloc(sizeof(*v3d), GFP_KERNEL); + if (!v3d) + return -ENOMEM; + v3d->dev = dev; + v3d->pdev = pdev; + drm = &v3d->drm; + + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) + goto dev_free; + + ret = map_regs(v3d, &v3d->hub_regs, "hub"); + if (ret) + goto dev_free; + + ret = map_regs(v3d, &v3d->core_regs[0], "core0"); + if (ret) + goto dev_free; + + ident1 = V3D_READ(V3D_HUB_IDENT1); + v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + + if (v3d->ver < 41) { + ret = map_regs(v3d, &v3d->gca_regs, "gca"); + if (ret) + goto dev_free; + } + + v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->mmu_scratch) { + dev_err(dev, "Failed to allocate MMU scratch page\n"); + ret = -ENOMEM; + goto dev_free; + } + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_enable(dev); + + ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); + if (ret) + goto dma_free; + + platform_set_drvdata(pdev, drm); + drm->dev_private = v3d; + + ret = v3d_gem_init(drm); + if (ret) + goto dev_destroy; + + v3d_irq_init(v3d); + + ret = drm_dev_register(drm, 0); + if (ret) + goto gem_destroy; + + return 0; + +gem_destroy: + v3d_gem_destroy(drm); +dev_destroy: + drm_dev_put(drm); +dma_free: + dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); +dev_free: + kfree(v3d); + return ret; +} + +static int v3d_platform_drm_remove(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + struct v3d_dev *v3d = to_v3d_dev(drm); + + drm_dev_unregister(drm); + + v3d_gem_destroy(drm); + + drm_dev_put(drm); + + dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + + return 0; +} + +static struct platform_driver v3d_platform_driver = { + .probe = v3d_platform_drm_probe, + .remove = v3d_platform_drm_remove, + .driver = { + .name = "v3d", + .of_match_table = v3d_of_match, + }, +}; + +static int __init v3d_drm_register(void) +{ + return platform_driver_register(&v3d_platform_driver); +} + +static void __exit v3d_drm_unregister(void) +{ + platform_driver_unregister(&v3d_platform_driver); +} + +module_init(v3d_drm_register); +module_exit(v3d_drm_unregister); + +MODULE_ALIAS("platform:v3d-drm"); +MODULE_DESCRIPTION("Broadcom V3D DRM Driver"); +MODULE_AUTHOR("Eric Anholt "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h new file mode 100644 index 000000000000..a043ac3aae98 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#include +#include +#include +#include +#include + +#define GMP_GRANULARITY (128 * 1024) + +/* Enum for each of the V3D queues. We maintain various queue + * tracking as an array because at some point we'll want to support + * the TFU (texture formatting unit) as another queue. + */ +enum v3d_queue { + V3D_BIN, + V3D_RENDER, +}; + +#define V3D_MAX_QUEUES (V3D_RENDER + 1) + +struct v3d_queue_state { + struct drm_gpu_scheduler sched; + + u64 fence_context; + u64 emit_seqno; + u64 finished_seqno; +}; + +struct v3d_dev { + struct drm_device drm; + + /* Short representation (e.g. 33, 41) of the V3D tech version + * and revision. + */ + int ver; + + struct device *dev; + struct platform_device *pdev; + void __iomem *hub_regs; + void __iomem *core_regs[3]; + void __iomem *bridge_regs; + void __iomem *gca_regs; + struct clk *clk; + + /* Virtual and DMA addresses of the single shared page table. */ + volatile u32 *pt; + dma_addr_t pt_paddr; + + /* Virtual and DMA addresses of the MMU's scratch page. When + * a read or write is invalid in the MMU, it will be + * redirected here. + */ + void *mmu_scratch; + dma_addr_t mmu_scratch_paddr; + + /* Number of V3D cores. */ + u32 cores; + + /* Allocator managing the address space. All units are in + * number of pages. + */ + struct drm_mm mm; + spinlock_t mm_lock; + + struct work_struct overflow_mem_work; + + struct v3d_exec_info *bin_job; + struct v3d_exec_info *render_job; + + struct v3d_queue_state queue[V3D_MAX_QUEUES]; + + /* Spinlock used to synchronize the overflow memory + * management against bin job submission. + */ + spinlock_t job_lock; + + /* Protects bo_stats */ + struct mutex bo_lock; + + /* Lock taken when resetting the GPU, to keep multiple + * processes from trying to park the scheduler threads and + * reset at once. + */ + struct mutex reset_lock; + + struct { + u32 num_allocated; + u32 pages_allocated; + } bo_stats; +}; + +static inline struct v3d_dev * +to_v3d_dev(struct drm_device *dev) +{ + return (struct v3d_dev *)dev->dev_private; +} + +/* The per-fd struct, which tracks the MMU mappings. */ +struct v3d_file_priv { + struct v3d_dev *v3d; + + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; +}; + +/* Tracks a mapping of a BO into a per-fd address space */ +struct v3d_vma { + struct v3d_page_table *pt; + struct list_head list; /* entry in v3d_bo.vmas */ +}; + +struct v3d_bo { + struct drm_gem_object base; + + struct mutex lock; + + struct drm_mm_node node; + + u32 pages_refcount; + struct page **pages; + struct sg_table *sgt; + void *vaddr; + + struct list_head vmas; /* list of v3d_vma */ + + /* List entry for the BO's position in + * v3d_exec_info->unref_list + */ + struct list_head unref_head; + + /* normally (resv == &_resv) except for imported bo's */ + struct reservation_object *resv; + struct reservation_object _resv; +}; + +static inline struct v3d_bo * +to_v3d_bo(struct drm_gem_object *bo) +{ + return (struct v3d_bo *)bo; +} + +struct v3d_fence { + struct dma_fence base; + struct drm_device *dev; + /* v3d seqno for signaled() test */ + u64 seqno; + enum v3d_queue queue; +}; + +static inline struct v3d_fence * +to_v3d_fence(struct dma_fence *fence) +{ + return (struct v3d_fence *)fence; +} + +#define V3D_READ(offset) readl(v3d->hub_regs + offset) +#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset) + +#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset) +#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset) + +#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) +#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) + +#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) +#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) + +struct v3d_job { + struct drm_sched_job base; + + struct v3d_exec_info *exec; + + /* An optional fence userspace can pass in for the job to depend on. */ + struct dma_fence *in_fence; + + /* v3d fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *done_fence; + + /* GPU virtual addresses of the start/end of the CL job. */ + u32 start, end; +}; + +struct v3d_exec_info { + struct v3d_dev *v3d; + + struct v3d_job bin, render; + + /* Fence for when the scheduler considers the binner to be + * done, for render to depend on. + */ + struct dma_fence *bin_done_fence; + + struct kref refcount; + + /* This is the array of BOs that were looked up at the start of exec. */ + struct v3d_bo **bo; + u32 bo_count; + + /* List of overflow BOs used in the job that need to be + * released once the job is complete. + */ + struct list_head unref_list; + + /* Submitted tile memory allocation start/size, tile state. */ + u32 qma, qms, qts; +}; + +/** + * _wait_for - magic (register) wait macro + * + * Does the right thing for modeset paths when run under kdgb or similar atomic + * contexts. Note that it's important that we check the condition again after + * having timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define wait_for(COND, MS) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + if (!(COND)) \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + msleep(1); \ + } \ + ret__; \ +}) + +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + +/* v3d_bo.c */ +void v3d_free_object(struct drm_gem_object *gem_obj); +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t size); +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_gem_fault(struct vm_fault *vmf); +int v3d_mmap(struct file *filp, struct vm_area_struct *vma); +struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); +int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/* v3d_debugfs.c */ +int v3d_debugfs_init(struct drm_minor *minor); + +/* v3d_fence.c */ +extern const struct dma_fence_ops v3d_fence_ops; +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); + +/* v3d_gem.c */ +int v3d_gem_init(struct drm_device *dev); +void v3d_gem_destroy(struct drm_device *dev); +int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void v3d_exec_put(struct v3d_exec_info *exec); +void v3d_reset(struct v3d_dev *v3d); +void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_flush_caches(struct v3d_dev *v3d); + +/* v3d_irq.c */ +void v3d_irq_init(struct v3d_dev *v3d); +void v3d_irq_enable(struct v3d_dev *v3d); +void v3d_irq_disable(struct v3d_dev *v3d); +void v3d_irq_reset(struct v3d_dev *v3d); + +/* v3d_mmu.c */ +int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, + u32 *offset); +int v3d_mmu_set_page_table(struct v3d_dev *v3d); +void v3d_mmu_insert_ptes(struct v3d_bo *bo); +void v3d_mmu_remove_ptes(struct v3d_bo *bo); + +/* v3d_sched.c */ +int v3d_sched_init(struct v3d_dev *v3d); +void v3d_sched_fini(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c new file mode 100644 index 000000000000..087d49c8cb12 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#include "v3d_drv.h" + +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) +{ + struct v3d_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return ERR_PTR(-ENOMEM); + + fence->dev = &v3d->drm; + fence->queue = queue; + fence->seqno = ++v3d->queue[queue].emit_seqno; + dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, + v3d->queue[queue].fence_context, fence->seqno); + + return &fence->base; +} + +static const char *v3d_fence_get_driver_name(struct dma_fence *fence) +{ + return "v3d"; +} + +static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) +{ + struct v3d_fence *f = to_v3d_fence(fence); + + if (f->queue == V3D_BIN) + return "v3d-bin"; + else + return "v3d-render"; +} + +static bool v3d_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static bool v3d_fence_signaled(struct dma_fence *fence) +{ + struct v3d_fence *f = to_v3d_fence(fence); + struct v3d_dev *v3d = to_v3d_dev(f->dev); + + return v3d->queue[f->queue].finished_seqno >= f->seqno; +} + +const struct dma_fence_ops v3d_fence_ops = { + .get_driver_name = v3d_fence_get_driver_name, + .get_timeline_name = v3d_fence_get_timeline_name, + .enable_signaling = v3d_fence_enable_signaling, + .signaled = v3d_fence_signaled, + .wait = dma_fence_default_wait, + .release = dma_fence_free, +}; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c new file mode 100644 index 000000000000..b513f9189caf --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uapi/drm/v3d_drm.h" +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static void +v3d_init_core(struct v3d_dev *v3d, int core) +{ + /* Set OVRTMUOUT, which means that the texture sampler uniform + * configuration's tmu output type field is used, instead of + * using the hardware default behavior based on the texture + * type. If you want the default behavior, you can still put + * "2" in the indirect texture state's output_type field. + */ + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); + + /* Whenever we flush the L2T cache, we always want to flush + * the whole thing. + */ + V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); + V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); +} + +/* Sets invariant state for the HW. */ +static void +v3d_init_hw_state(struct v3d_dev *v3d) +{ + v3d_init_core(v3d, 0); +} + +static void +v3d_idle_axi(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); + + if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & + (V3D_GMP_STATUS_RD_COUNT_MASK | + V3D_GMP_STATUS_WR_COUNT_MASK | + V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { + DRM_ERROR("Failed to wait for safe GMP shutdown\n"); + } +} + +static void +v3d_idle_gca(struct v3d_dev *v3d) +{ + if (v3d->ver >= 41) + return; + + V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); + + if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { + DRM_ERROR("Failed to wait for safe GCA shutdown\n"); + } +} + +static void +v3d_reset_v3d(struct v3d_dev *v3d) +{ + int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); + + if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, + V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); + + /* GFXH-1383: The SW_INIT may cause a stray write to address 0 + * of the unit, so reset it to its power-on value here. + */ + V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); + } else { + WARN_ON_ONCE(V3D_GET_FIELD(version, + V3D_TOP_GR_BRIDGE_MAJOR) != 7); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, + V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); + } + + v3d_init_hw_state(v3d); +} + +void +v3d_reset(struct v3d_dev *v3d) +{ + struct drm_device *dev = &v3d->drm; + + DRM_ERROR("Resetting GPU.\n"); + trace_v3d_reset_begin(dev); + + /* XXX: only needed for safe powerdown, not reset. */ + if (false) + v3d_idle_axi(v3d, 0); + + v3d_idle_gca(v3d); + v3d_reset_v3d(v3d); + + v3d_mmu_set_page_table(v3d); + v3d_irq_reset(v3d); + + trace_v3d_reset_end(dev); +} + +static void +v3d_flush_l3(struct v3d_dev *v3d) +{ + if (v3d->ver < 41) { + u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); + + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); + + if (v3d->ver < 33) { + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); + } + } +} + +/* Invalidates the (read-only) L2 cache. */ +static void +v3d_invalidate_l2(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, + V3D_L2CACTL_L2CCLR | + V3D_L2CACTL_L2CENA); +} + +static void +v3d_invalidate_l1td(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); + } +} + +/* Invalidates texture L2 cachelines */ +static void +v3d_flush_l2t(struct v3d_dev *v3d, int core) +{ + v3d_invalidate_l1td(v3d, core); + + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T flush\n"); + } +} + +/* Invalidates the slice caches. These are read-only caches. */ +static void +v3d_invalidate_slices(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, + V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); +} + +/* Invalidates texture L2 cachelines */ +static void +v3d_invalidate_l2t(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, + V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM)); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T invalidate\n"); + } +} + +void +v3d_invalidate_caches(struct v3d_dev *v3d) +{ + v3d_flush_l3(v3d); + + v3d_invalidate_l2(v3d, 0); + v3d_invalidate_slices(v3d, 0); + v3d_flush_l2t(v3d, 0); +} + +void +v3d_flush_caches(struct v3d_dev *v3d) +{ + v3d_invalidate_l1td(v3d, 0); + v3d_invalidate_l2t(v3d, 0); +} + +static void +v3d_attach_object_fences(struct v3d_exec_info *exec) +{ + struct dma_fence *out_fence = &exec->render.base.s_fence->finished; + struct v3d_bo *bo; + int i; + + for (i = 0; i < exec->bo_count; i++) { + bo = to_v3d_bo(&exec->bo[i]->base); + + /* XXX: Use shared fences for read-only objects. */ + reservation_object_add_excl_fence(bo->resv, out_fence); + } +} + +static void +v3d_unlock_bo_reservations(struct drm_device *dev, + struct v3d_exec_info *exec, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < exec->bo_count; i++) { + struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base); + + ww_mutex_unlock(&bo->resv->lock); + } + + ww_acquire_fini(acquire_ctx); +} + +/* Takes the reservation lock on all the BOs being referenced, so that + * at queue submit time we can update the reservations. + * + * We don't lock the RCL the tile alloc/state BOs, or overflow memory + * (all of which are on exec->unref_list). They're entirely private + * to v3d, so we don't attach dma-buf fences to them. + */ +static int +v3d_lock_bo_reservations(struct drm_device *dev, + struct v3d_exec_info *exec, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended_lock = -1; + int i, ret; + struct v3d_bo *bo; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended_lock != -1) { + bo = to_v3d_bo(&exec->bo[contended_lock]->base); + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < exec->bo_count; i++) { + if (i == contended_lock) + continue; + + bo = to_v3d_bo(&exec->bo[i]->base); + + ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) { + bo = to_v3d_bo(&exec->bo[j]->base); + ww_mutex_unlock(&bo->resv->lock); + } + + if (contended_lock != -1 && contended_lock >= i) { + bo = to_v3d_bo(&exec->bo[contended_lock]->base); + + ww_mutex_unlock(&bo->resv->lock); + } + + if (ret == -EDEADLK) { + contended_lock = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + /* Reserve space for our shared (read-only) fence references, + * before we commit the CL to the hardware. + */ + for (i = 0; i < exec->bo_count; i++) { + bo = to_v3d_bo(&exec->bo[i]->base); + + ret = reservation_object_reserve_shared(bo->resv); + if (ret) { + v3d_unlock_bo_reservations(dev, exec, acquire_ctx); + return ret; + } + } + + return 0; +} + +/** + * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects + * referenced by the job. + * @dev: DRM device + * @file_priv: DRM file for this fd + * @exec: V3D job being set up + * + * The command validator needs to reference BOs by their index within + * the submitted job's BO list. This does the validation of the job's + * BO list and reference counting for the lifetime of the job. + * + * Note that this function doesn't need to unreference the BOs on + * failure, because that will happen at v3d_exec_cleanup() time. + */ +static int +v3d_cl_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_v3d_submit_cl *args, + struct v3d_exec_info *exec) +{ + u32 *handles; + int ret = 0; + int i; + + exec->bo_count = args->bo_handle_count; + + if (!exec->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_DEBUG("Rendering requires BOs\n"); + return -EINVAL; + } + + exec->bo = kvmalloc_array(exec->bo_count, + sizeof(struct drm_gem_cma_object *), + GFP_KERNEL | __GFP_ZERO); + if (!exec->bo) { + DRM_DEBUG("Failed to allocate validated BO pointers\n"); + return -ENOMEM; + } + + handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL); + if (!handles) { + ret = -ENOMEM; + DRM_DEBUG("Failed to allocate incoming GEM handles\n"); + goto fail; + } + + if (copy_from_user(handles, + (void __user *)(uintptr_t)args->bo_handles, + exec->bo_count * sizeof(u32))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy in GEM handles\n"); + goto fail; + } + + spin_lock(&file_priv->table_lock); + for (i = 0; i < exec->bo_count; i++) { + struct drm_gem_object *bo = idr_find(&file_priv->object_idr, + handles[i]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + i, handles[i]); + ret = -ENOENT; + spin_unlock(&file_priv->table_lock); + goto fail; + } + drm_gem_object_get(bo); + exec->bo[i] = to_v3d_bo(bo); + } + spin_unlock(&file_priv->table_lock); + +fail: + kvfree(handles); + return ret; +} + +static void +v3d_exec_cleanup(struct kref *ref) +{ + struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info, + refcount); + struct v3d_dev *v3d = exec->v3d; + unsigned int i; + struct v3d_bo *bo, *save; + + dma_fence_put(exec->bin.in_fence); + dma_fence_put(exec->render.in_fence); + + dma_fence_put(exec->bin.done_fence); + dma_fence_put(exec->render.done_fence); + + dma_fence_put(exec->bin_done_fence); + + for (i = 0; i < exec->bo_count; i++) + drm_gem_object_put_unlocked(&exec->bo[i]->base); + kvfree(exec->bo); + + list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { + drm_gem_object_put_unlocked(&bo->base); + } + + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + + kfree(exec); +} + +void v3d_exec_put(struct v3d_exec_info *exec) +{ + kref_put(&exec->refcount, v3d_exec_cleanup); +} + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + struct drm_gem_object *gem_obj; + struct v3d_bo *bo; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -EINVAL; + } + bo = to_v3d_bo(gem_obj); + + ret = reservation_object_wait_timeout_rcu(bo->resv, + true, true, + timeout_jiffies); + + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + drm_gem_object_put_unlocked(gem_obj); + + return ret; +} + +/** + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * This is the main entrypoint for userspace to submit a 3D frame to + * the GPU. Userspace provides the binner command list (if + * applicable), and the kernel sets up the render command list to draw + * to the framebuffer described in the ioctl, using the command lists + * that the 3D engine's binner will produce. + */ +int +v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_cl *args = data; + struct v3d_exec_info *exec; + struct ww_acquire_ctx acquire_ctx; + struct drm_syncobj *sync_out; + int ret = 0; + + if (args->pad != 0) { + DRM_INFO("pad must be zero: %d\n", args->pad); + return -EINVAL; + } + + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); + if (!exec) + return -ENOMEM; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) { + kfree(exec); + return ret; + } + + kref_init(&exec->refcount); + + ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, + &exec->bin.in_fence); + if (ret == -EINVAL) + goto fail; + + ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, + &exec->render.in_fence); + if (ret == -EINVAL) + goto fail; + + exec->qma = args->qma; + exec->qms = args->qms; + exec->qts = args->qts; + exec->bin.exec = exec; + exec->bin.start = args->bcl_start; + exec->bin.end = args->bcl_end; + exec->render.exec = exec; + exec->render.start = args->rcl_start; + exec->render.end = args->rcl_end; + exec->v3d = v3d; + INIT_LIST_HEAD(&exec->unref_list); + + ret = v3d_cl_lookup_bos(dev, file_priv, args, exec); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx); + if (ret) + goto fail; + + if (exec->bin.start != exec->bin.end) { + ret = drm_sched_job_init(&exec->bin.base, + &v3d->queue[V3D_BIN].sched, + &v3d_priv->sched_entity[V3D_BIN], + v3d_priv); + if (ret) + goto fail_unreserve; + + exec->bin_done_fence = + dma_fence_get(&exec->bin.base.s_fence->finished); + + kref_get(&exec->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&exec->bin.base, + &v3d_priv->sched_entity[V3D_BIN]); + } + + ret = drm_sched_job_init(&exec->render.base, + &v3d->queue[V3D_RENDER].sched, + &v3d_priv->sched_entity[V3D_RENDER], + v3d_priv); + if (ret) + goto fail_unreserve; + + kref_get(&exec->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&exec->render.base, + &v3d_priv->sched_entity[V3D_RENDER]); + + v3d_attach_object_fences(exec); + + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); + + /* Update the return sync object for the */ + sync_out = drm_syncobj_find(file_priv, args->out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, + &exec->render.base.s_fence->finished); + drm_syncobj_put(sync_out); + } + + v3d_exec_put(exec); + + return 0; + +fail_unreserve: + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); +fail: + v3d_exec_put(exec); + + return ret; +} + +int +v3d_gem_init(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 pt_size = 4096 * 1024; + int ret, i; + + for (i = 0; i < V3D_MAX_QUEUES; i++) + v3d->queue[i].fence_context = dma_fence_context_alloc(1); + + spin_lock_init(&v3d->mm_lock); + spin_lock_init(&v3d->job_lock); + mutex_init(&v3d->bo_lock); + mutex_init(&v3d->reset_lock); + + /* Note: We don't allocate address 0. Various bits of HW + * treat 0 as special, such as the occlusion query counters + * where 0 means "disabled". + */ + drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); + + v3d->pt = dma_alloc_wc(v3d->dev, pt_size, + &v3d->pt_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->pt) { + drm_mm_takedown(&v3d->mm); + dev_err(v3d->dev, + "Failed to allocate page tables. " + "Please ensure you have CMA enabled.\n"); + return -ENOMEM; + } + + v3d_init_hw_state(v3d); + v3d_mmu_set_page_table(v3d); + + ret = v3d_sched_init(v3d); + if (ret) { + drm_mm_takedown(&v3d->mm); + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, + v3d->pt_paddr); + } + + return 0; +} + +void +v3d_gem_destroy(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + enum v3d_queue q; + + v3d_sched_fini(v3d); + + /* Waiting for exec to finish would need to be done before + * unregistering V3D. + */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + WARN_ON(v3d->queue[q].emit_seqno != + v3d->queue[q].finished_seqno); + } + + drm_mm_takedown(&v3d->mm); + + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c new file mode 100644 index 000000000000..77e1fa046c10 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Interrupt management for the V3D engine + * + * When we take a binning or rendering flush done interrupt, we need + * to signal the fence for that job so that the scheduler can queue up + * the next one and unblock any waiters. + * + * When we take the binner out of memory interrupt, we need to + * allocate some new memory and pass it to the binner so that the + * current job can make progress. + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ + V3D_INT_FLDONE | \ + V3D_INT_FRDONE | \ + V3D_INT_GMPV)) + +#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ + V3D_HUB_INT_MMU_PTI | \ + V3D_HUB_INT_MMU_CAP)) + +static void +v3d_overflow_mem_work(struct work_struct *work) +{ + struct v3d_dev *v3d = + container_of(work, struct v3d_dev, overflow_mem_work); + struct drm_device *dev = &v3d->drm; + struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + unsigned long irqflags; + + if (IS_ERR(bo)) { + DRM_ERROR("Couldn't allocate binner overflow mem\n"); + return; + } + + /* We lost a race, and our work task came in after the bin job + * completed and exited. This can happen because the HW + * signals OOM before it's fully OOM, so the binner might just + * barely complete. + * + * If we lose the race and our work task comes in after a new + * bin job got scheduled, that's fine. We'll just give them + * some binner pool anyway. + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + if (!v3d->bin_job) { + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + goto out; + } + + drm_gem_object_get(&bo->base); + list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); + V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); + +out: + drm_gem_object_put_unlocked(&bo->base); +} + +static irqreturn_t +v3d_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); + + if (intsts & V3D_INT_OUTOMEM) { + /* Note that the OOM status is edge signaled, so the + * interrupt won't happen again until the we actually + * add more memory. + */ + schedule_work(&v3d->overflow_mem_work); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FLDONE) { + v3d->queue[V3D_BIN].finished_seqno++; + dma_fence_signal(v3d->bin_job->bin.done_fence); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FRDONE) { + v3d->queue[V3D_RENDER].finished_seqno++; + dma_fence_signal(v3d->render_job->render.done_fence); + + status = IRQ_HANDLED; + } + + /* We shouldn't be triggering these if we have GMP in + * always-allowed mode. + */ + if (intsts & V3D_INT_GMPV) + dev_err(v3d->dev, "GMP violation\n"); + + return status; +} + +static irqreturn_t +v3d_hub_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_READ(V3D_HUB_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_WRITE(V3D_HUB_INT_CLR, intsts); + + if (intsts & (V3D_HUB_INT_MMU_WRV | + V3D_HUB_INT_MMU_PTI | + V3D_HUB_INT_MMU_CAP)) { + u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); + u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; + + dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", + axi_id, (long long)vio_addr, + ((intsts & V3D_HUB_INT_MMU_WRV) ? + ", write violation" : ""), + ((intsts & V3D_HUB_INT_MMU_PTI) ? + ", pte invalid" : ""), + ((intsts & V3D_HUB_INT_MMU_CAP) ? + ", cap exceeded" : "")); + status = IRQ_HANDLED; + } + + return status; +} + +void +v3d_irq_init(struct v3d_dev *v3d) +{ + int ret, core; + + INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); + + /* Clear any pending interrupts someone might have left around + * for us. + */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + + v3d_irq_enable(v3d); +} + +void +v3d_irq_enable(struct v3d_dev *v3d) +{ + int core; + + /* Enable our set of interrupts, masking out any others. */ + for (core = 0; core < v3d->cores; core++) { + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); + } + + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); + V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); +} + +void +v3d_irq_disable(struct v3d_dev *v3d) +{ + int core; + + /* Disable all interrupts. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + + /* Clear any pending interrupts we might have left. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + cancel_work_sync(&v3d->overflow_mem_work); +} + +/** Reinitializes interrupt registers when a GPU reset is performed. */ +void v3d_irq_reset(struct v3d_dev *v3d) +{ + v3d_irq_enable(v3d); +} diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c new file mode 100644 index 000000000000..b00f97c31b70 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +/** + * DOC: Broadcom V3D MMU + * + * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has + * a single level of page tables for the V3D's 4GB address space to + * map to AXI bus addresses, thus it could need up to 4MB of + * physically contiguous memory to store the PTEs. + * + * Because the 4MB of contiguous memory for page tables is precious, + * and switching between them is expensive, we load all BOs into the + * same 4GB address space. + * + * To protect clients from each other, we should use the GMP to + * quickly mask out (at 128kb granularity) what pages are available to + * each client. This is not yet implemented. + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_MMU_PAGE_SHIFT 12 + +/* Note: All PTEs for the 1MB superpage must be filled with the + * superpage bit set. + */ +#define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_WRITEABLE BIT(29) +#define V3D_PTE_VALID BIT(28) + +static int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; + + /* Make sure that another flush isn't already running when we + * start this one. + */ + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) + dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n"); + + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + V3D_WRITE(V3D_MMUC_CONTROL, + V3D_MMUC_CONTROL_FLUSH | + V3D_MMUC_CONTROL_ENABLE); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) { + dev_err(v3d->dev, "TLB clear wait idle failed\n"); + return ret; + } + + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); + if (ret) + dev_err(v3d->dev, "MMUC flush wait idle failed\n"); + + return ret; +} + +int v3d_mmu_set_page_table(struct v3d_dev *v3d) +{ + V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); + V3D_WRITE(V3D_MMU_CTL, + V3D_MMU_CTL_ENABLE | + V3D_MMU_CTL_PT_INVALID | + V3D_MMU_CTL_PT_INVALID_ABORT | + V3D_MMU_CTL_WRITE_VIOLATION_ABORT | + V3D_MMU_CTL_CAP_EXCEEDED_ABORT); + V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, + (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | + V3D_MMU_ILLEGAL_ADDR_ENABLE); + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE); + + return v3d_mmu_flush_all(v3d); +} + +void v3d_mmu_insert_ptes(struct v3d_bo *bo) +{ + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + u32 page = bo->node.start; + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + unsigned int count; + struct scatterlist *sgl; + + for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { + u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; + u32 pte = page_prot | page_address; + u32 i; + + BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= + BIT(24)); + + for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) + v3d->pt[page++] = pte + i; + } + + WARN_ON_ONCE(page - bo->node.start != + bo->base.size >> V3D_MMU_PAGE_SHIFT); + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->dev, "MMU flush timeout\n"); +} + +void v3d_mmu_remove_ptes(struct v3d_bo *bo) +{ + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; + u32 page; + + for (page = bo->node.start; page < bo->node.start + npages; page++) + v3d->pt[page] = 0; + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->dev, "MMU flush timeout\n"); +} diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h new file mode 100644 index 000000000000..fc13282dfc2f --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#ifndef V3D_REGS_H +#define V3D_REGS_H + +#include + +#define V3D_MASK(high, low) ((u32)GENMASK(high, low)) +/* Using the GNU statement expression extension */ +#define V3D_SET_FIELD(value, field) \ + ({ \ + u32 fieldval = (value) << field##_SHIFT; \ + WARN_ON((fieldval & ~field##_MASK) != 0); \ + fieldval & field##_MASK; \ + }) + +#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \ + field##_SHIFT) + +/* Hub registers for shared hardware between V3D cores. */ + +#define V3D_HUB_AXICFG 0x00000 +# define V3D_HUB_AXICFG_MAX_LEN_MASK V3D_MASK(3, 0) +# define V3D_HUB_AXICFG_MAX_LEN_SHIFT 0 +#define V3D_HUB_UIFCFG 0x00004 +#define V3D_HUB_IDENT0 0x00008 + +#define V3D_HUB_IDENT1 0x0000c +# define V3D_HUB_IDENT1_WITH_MSO BIT(19) +# define V3D_HUB_IDENT1_WITH_TSY BIT(18) +# define V3D_HUB_IDENT1_WITH_TFU BIT(17) +# define V3D_HUB_IDENT1_WITH_L3C BIT(16) +# define V3D_HUB_IDENT1_NHOSTS_MASK V3D_MASK(15, 12) +# define V3D_HUB_IDENT1_NHOSTS_SHIFT 12 +# define V3D_HUB_IDENT1_NCORES_MASK V3D_MASK(11, 8) +# define V3D_HUB_IDENT1_NCORES_SHIFT 8 +# define V3D_HUB_IDENT1_REV_MASK V3D_MASK(7, 4) +# define V3D_HUB_IDENT1_REV_SHIFT 4 +# define V3D_HUB_IDENT1_TVER_MASK V3D_MASK(3, 0) +# define V3D_HUB_IDENT1_TVER_SHIFT 0 + +#define V3D_HUB_IDENT2 0x00010 +# define V3D_HUB_IDENT2_WITH_MMU BIT(8) +# define V3D_HUB_IDENT2_L3C_NKB_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT2_L3C_NKB_SHIFT 0 + +#define V3D_HUB_IDENT3 0x00014 +# define V3D_HUB_IDENT3_IPREV_MASK V3D_MASK(15, 8) +# define V3D_HUB_IDENT3_IPREV_SHIFT 8 +# define V3D_HUB_IDENT3_IPIDX_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT3_IPIDX_SHIFT 0 + +#define V3D_HUB_INT_STS 0x00050 +#define V3D_HUB_INT_SET 0x00054 +#define V3D_HUB_INT_CLR 0x00058 +#define V3D_HUB_INT_MSK_STS 0x0005c +#define V3D_HUB_INT_MSK_SET 0x00060 +#define V3D_HUB_INT_MSK_CLR 0x00064 +# define V3D_HUB_INT_MMU_WRV BIT(5) +# define V3D_HUB_INT_MMU_PTI BIT(4) +# define V3D_HUB_INT_MMU_CAP BIT(3) +# define V3D_HUB_INT_MSO BIT(2) +# define V3D_HUB_INT_TFUC BIT(1) +# define V3D_HUB_INT_TFUF BIT(0) + +#define V3D_GCA_CACHE_CTRL 0x0000c +# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN 0x000b0 +# define V3D_GCA_SAFE_SHUTDOWN_EN BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN_ACK 0x000b4 +# define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED 3 + +# define V3D_TOP_GR_BRIDGE_REVISION 0x00000 +# define V3D_TOP_GR_BRIDGE_MAJOR_MASK V3D_MASK(15, 8) +# define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT 8 +# define V3D_TOP_GR_BRIDGE_MINOR_MASK V3D_MASK(7, 0) +# define V3D_TOP_GR_BRIDGE_MINOR_SHIFT 0 + +/* 7268 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_0 0x00008 +# define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0) +/* 7278 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c +# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) + +/* Per-MMU registers. */ + +#define V3D_MMUC_CONTROL 0x01000 +# define V3D_MMUC_CONTROL_CLEAR BIT(3) +# define V3D_MMUC_CONTROL_FLUSHING BIT(2) +# define V3D_MMUC_CONTROL_FLUSH BIT(1) +# define V3D_MMUC_CONTROL_ENABLE BIT(0) + +#define V3D_MMU_CTL 0x01200 +# define V3D_MMU_CTL_CAP_EXCEEDED BIT(27) +# define V3D_MMU_CTL_CAP_EXCEEDED_ABORT BIT(26) +# define V3D_MMU_CTL_CAP_EXCEEDED_INT BIT(25) +# define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION BIT(24) +# define V3D_MMU_CTL_PT_INVALID BIT(20) +# define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19) +# define V3D_MMU_CTL_PT_INVALID_INT BIT(18) +# define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17) +# define V3D_MMU_CTL_WRITE_VIOLATION BIT(16) +# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11) +# define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10) +# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9) +# define V3D_MMU_CTL_TLB_CLEARING BIT(7) +# define V3D_MMU_CTL_TLB_STATS_CLEAR BIT(3) +# define V3D_MMU_CTL_TLB_CLEAR BIT(2) +# define V3D_MMU_CTL_TLB_STATS_ENABLE BIT(1) +# define V3D_MMU_CTL_ENABLE BIT(0) + +#define V3D_MMU_PT_PA_BASE 0x01204 +#define V3D_MMU_HIT 0x01208 +#define V3D_MMU_MISSES 0x0120c +#define V3D_MMU_STALLS 0x01210 + +#define V3D_MMU_ADDR_CAP 0x01214 +# define V3D_MMU_ADDR_CAP_ENABLE BIT(31) +# define V3D_MMU_ADDR_CAP_MPAGE_MASK V3D_MASK(11, 0) +# define V3D_MMU_ADDR_CAP_MPAGE_SHIFT 0 + +#define V3D_MMU_SHOOT_DOWN 0x01218 +# define V3D_MMU_SHOOT_DOWN_SHOOTING BIT(29) +# define V3D_MMU_SHOOT_DOWN_SHOOT BIT(28) +# define V3D_MMU_SHOOT_DOWN_PAGE_MASK V3D_MASK(27, 0) +# define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT 0 + +#define V3D_MMU_BYPASS_START 0x0121c +#define V3D_MMU_BYPASS_END 0x01220 + +/* AXI ID of the access that faulted */ +#define V3D_MMU_VIO_ID 0x0122c + +/* Address for illegal PTEs to return */ +#define V3D_MMU_ILLEGAL_ADDR 0x01230 +# define V3D_MMU_ILLEGAL_ADDR_ENABLE BIT(31) + +/* Address that faulted */ +#define V3D_MMU_VIO_ADDR 0x01234 + +/* Per-V3D-core registers */ + +#define V3D_CTL_IDENT0 0x00000 +# define V3D_IDENT0_VER_MASK V3D_MASK(31, 24) +# define V3D_IDENT0_VER_SHIFT 24 + +#define V3D_CTL_IDENT1 0x00004 +/* Multiples of 1kb */ +# define V3D_IDENT1_VPM_SIZE_MASK V3D_MASK(31, 28) +# define V3D_IDENT1_VPM_SIZE_SHIFT 28 +# define V3D_IDENT1_NSEM_MASK V3D_MASK(23, 16) +# define V3D_IDENT1_NSEM_SHIFT 16 +# define V3D_IDENT1_NTMU_MASK V3D_MASK(15, 12) +# define V3D_IDENT1_NTMU_SHIFT 12 +# define V3D_IDENT1_QUPS_MASK V3D_MASK(11, 8) +# define V3D_IDENT1_QUPS_SHIFT 8 +# define V3D_IDENT1_NSLC_MASK V3D_MASK(7, 4) +# define V3D_IDENT1_NSLC_SHIFT 4 +# define V3D_IDENT1_REV_MASK V3D_MASK(3, 0) +# define V3D_IDENT1_REV_SHIFT 0 + +#define V3D_CTL_IDENT2 0x00008 +# define V3D_IDENT2_BCG_INT BIT(28) + +#define V3D_CTL_MISCCFG 0x00018 +# define V3D_MISCCFG_OVRTMUOUT BIT(0) + +#define V3D_CTL_L2CACTL 0x00020 +# define V3D_L2CACTL_L2CCLR BIT(2) +# define V3D_L2CACTL_L2CDIS BIT(1) +# define V3D_L2CACTL_L2CENA BIT(0) + +#define V3D_CTL_SLCACTL 0x00024 +# define V3D_SLCACTL_TVCCS_MASK V3D_MASK(27, 24) +# define V3D_SLCACTL_TVCCS_SHIFT 24 +# define V3D_SLCACTL_TDCCS_MASK V3D_MASK(19, 16) +# define V3D_SLCACTL_TDCCS_SHIFT 16 +# define V3D_SLCACTL_UCC_MASK V3D_MASK(11, 8) +# define V3D_SLCACTL_UCC_SHIFT 8 +# define V3D_SLCACTL_ICC_MASK V3D_MASK(3, 0) +# define V3D_SLCACTL_ICC_SHIFT 0 + +#define V3D_CTL_L2TCACTL 0x00030 +# define V3D_L2TCACTL_TMUWCF BIT(8) +# define V3D_L2TCACTL_L2T_NO_WM BIT(4) +# define V3D_L2TCACTL_FLM_FLUSH 0 +# define V3D_L2TCACTL_FLM_CLEAR 1 +# define V3D_L2TCACTL_FLM_CLEAN 2 +# define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1) +# define V3D_L2TCACTL_FLM_SHIFT 1 +# define V3D_L2TCACTL_L2TFLS BIT(0) +#define V3D_CTL_L2TFLSTA 0x00034 +#define V3D_CTL_L2TFLEND 0x00038 + +#define V3D_CTL_INT_STS 0x00050 +#define V3D_CTL_INT_SET 0x00054 +#define V3D_CTL_INT_CLR 0x00058 +#define V3D_CTL_INT_MSK_STS 0x0005c +#define V3D_CTL_INT_MSK_SET 0x00060 +#define V3D_CTL_INT_MSK_CLR 0x00064 +# define V3D_INT_QPU_MASK V3D_MASK(27, 16) +# define V3D_INT_QPU_SHIFT 16 +# define V3D_INT_GMPV BIT(5) +# define V3D_INT_TRFB BIT(4) +# define V3D_INT_SPILLUSE BIT(3) +# define V3D_INT_OUTOMEM BIT(2) +# define V3D_INT_FLDONE BIT(1) +# define V3D_INT_FRDONE BIT(0) + +#define V3D_CLE_CT0CS 0x00100 +#define V3D_CLE_CT1CS 0x00104 +#define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n) +#define V3D_CLE_CT0EA 0x00108 +#define V3D_CLE_CT1EA 0x0010c +#define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n) +#define V3D_CLE_CT0CA 0x00110 +#define V3D_CLE_CT1CA 0x00114 +#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) +#define V3D_CLE_CT0RA 0x00118 +#define V3D_CLE_CT1RA 0x0011c +#define V3D_CLE_CT0LC 0x00120 +#define V3D_CLE_CT1LC 0x00124 +#define V3D_CLE_CT0PC 0x00128 +#define V3D_CLE_CT1PC 0x0012c +#define V3D_CLE_PCS 0x00130 +#define V3D_CLE_BFC 0x00134 +#define V3D_CLE_RFC 0x00138 +#define V3D_CLE_TFBC 0x0013c +#define V3D_CLE_TFIT 0x00140 +#define V3D_CLE_CT1CFG 0x00144 +#define V3D_CLE_CT1TILECT 0x00148 +#define V3D_CLE_CT1TSKIP 0x0014c +#define V3D_CLE_CT1PTCT 0x00150 +#define V3D_CLE_CT0SYNC 0x00154 +#define V3D_CLE_CT1SYNC 0x00158 +#define V3D_CLE_CT0QTS 0x0015c +# define V3D_CLE_CT0QTS_ENABLE BIT(1) +#define V3D_CLE_CT0QBA 0x00160 +#define V3D_CLE_CT1QBA 0x00164 +#define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n) +#define V3D_CLE_CT0QEA 0x00168 +#define V3D_CLE_CT1QEA 0x0016c +#define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n) +#define V3D_CLE_CT0QMA 0x00170 +#define V3D_CLE_CT0QMS 0x00174 +#define V3D_CLE_CT1QCFG 0x00178 +/* If set without ETPROC, entirely skip tiles with no primitives. */ +# define V3D_CLE_QCFG_ETFILT BIT(7) +/* If set with ETFILT, just write the clear color to tiles with no + * primitives. + */ +# define V3D_CLE_QCFG_ETPROC BIT(6) +# define V3D_CLE_QCFG_ETSFLUSH BIT(1) +# define V3D_CLE_QCFG_MCDIS BIT(0) + +#define V3D_PTB_BPCA 0x00300 +#define V3D_PTB_BPCS 0x00304 +#define V3D_PTB_BPOA 0x00308 +#define V3D_PTB_BPOS 0x0030c + +#define V3D_PTB_BXCF 0x00310 +# define V3D_PTB_BXCF_RWORDERDISA BIT(1) +# define V3D_PTB_BXCF_CLIPDISA BIT(0) + +#define V3D_GMP_STATUS 0x00800 +# define V3D_GMP_STATUS_GMPRST BIT(31) +# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24) +# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24 +# define V3D_GMP_STATUS_RD_COUNT_MASK V3D_MASK(22, 16) +# define V3D_GMP_STATUS_RD_COUNT_SHIFT 16 +# define V3D_GMP_STATUS_WR_ACTIVE BIT(5) +# define V3D_GMP_STATUS_RD_ACTIVE BIT(4) +# define V3D_GMP_STATUS_CFG_BUSY BIT(3) +# define V3D_GMP_STATUS_CNTOVF BIT(2) +# define V3D_GMP_STATUS_INVPROT BIT(1) +# define V3D_GMP_STATUS_VIO BIT(0) + +#define V3D_GMP_CFG 0x00804 +# define V3D_GMP_CFG_LBURSTEN BIT(3) +# define V3D_GMP_CFG_PGCRSEN BIT() +# define V3D_GMP_CFG_STOP_REQ BIT(1) +# define V3D_GMP_CFG_PROT_ENABLE BIT(0) + +#define V3D_GMP_VIO_ADDR 0x00808 +#define V3D_GMP_VIO_TYPE 0x0080c +#define V3D_GMP_TABLE_ADDR 0x00810 +#define V3D_GMP_CLEAR_LOAD 0x00814 +#define V3D_GMP_PRESERVE_LOAD 0x00818 +#define V3D_GMP_VALID_LINES 0x00820 + +#endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c new file mode 100644 index 000000000000..b07bece9417d --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2018 Broadcom */ + +/** + * DOC: Broadcom V3D scheduling + * + * The shared DRM GPU scheduler is used to coordinate submitting jobs + * to the hardware. Each DRM fd (roughly a client process) gets its + * own scheduler entity, which will process jobs in order. The GPU + * scheduler will round-robin between clients to submit the next job. + * + * For simplicity, and in order to keep latency low for interactive + * jobs when bulk background jobs are queued up, we submit a new job + * to the HW only when it has completed the last one, instead of + * filling up the CT[01]Q FIFOs with jobs. Similarly, we use + * v3d_job_dependency() to manage the dependency between bin and + * render, instead of having the clients submit jobs with using the + * HW's semaphores to interlock between them. + */ + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static struct v3d_job * +to_v3d_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_job, base); +} + +static void +v3d_job_free(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + + v3d_exec_put(job->exec); +} + +/** + * Returns the fences that the bin job depends on, one by one. + * v3d_job_run() won't be called until all of them have been signaled. + */ +static struct dma_fence * +v3d_job_dependency(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; + struct dma_fence *fence; + + fence = job->in_fence; + if (fence) { + job->in_fence = NULL; + return fence; + } + + if (q == V3D_RENDER) { + /* If we had a bin job, the render job definitely depends on + * it. We first have to wait for bin to be scheduled, so that + * its done_fence is created. + */ + fence = exec->bin_done_fence; + if (fence) { + exec->bin_done_fence = NULL; + return fence; + } + } + + /* XXX: Wait on a fence for switching the GMP if necessary, + * and then do so. + */ + + return fence; +} + +static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; + struct v3d_dev *v3d = exec->v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + unsigned long irqflags; + + if (unlikely(job->base.s_fence->finished.error)) + return NULL; + + /* Lock required around bin_job update vs + * v3d_overflow_mem_work(). + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + if (q == V3D_BIN) { + v3d->bin_job = job->exec; + + /* Clear out the overflow allocation, so we don't + * reuse the overflow attached to a previous job. + */ + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); + } else { + v3d->render_job = job->exec; + } + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + /* Can we avoid this flush when q==RENDER? We need to be + * careful of scheduling, though -- imagine job0 rendering to + * texture and job1 reading, and them being executed as bin0, + * bin1, render0, render1, so that render1's flush at bin time + * wasn't enough. + */ + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, q); + if (!fence) + return fence; + + if (job->done_fence) + dma_fence_put(job->done_fence); + job->done_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, + job->start, job->end); + + if (q == V3D_BIN) { + if (exec->qma) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); + } + if (exec->qts) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, + V3D_CLE_CT0QTS_ENABLE | + exec->qts); + } + } else { + /* XXX: Set the QCFG */ + } + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); + V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); + + return fence; +} + +static void +v3d_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_exec_info *exec = job->exec; + struct v3d_dev *v3d = exec->v3d; + enum v3d_queue q; + + mutex_lock(&v3d->reset_lock); + + /* block scheduler */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; + + kthread_park(sched->thread); + drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? + sched_job : NULL)); + } + + /* get the GPU back into the init state */ + v3d_reset(v3d); + + /* Unblock schedulers and restart their jobs. */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + drm_sched_job_recovery(&v3d->queue[q].sched); + kthread_unpark(v3d->queue[q].sched.thread); + } + + mutex_unlock(&v3d->reset_lock); +} + +static const struct drm_sched_backend_ops v3d_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_job_run, + .timedout_job = v3d_job_timedout, + .free_job = v3d_job_free +}; + +int +v3d_sched_init(struct v3d_dev *v3d) +{ + int hw_jobs_limit = 1; + int job_hang_limit = 0; + int hang_limit_ms = 500; + int ret; + + ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, + &v3d_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_bin"); + if (ret) { + dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret); + return ret; + } + + ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, + &v3d_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_render"); + if (ret) { + dev_err(v3d->dev, "Failed to create render scheduler: %d.", + ret); + drm_sched_fini(&v3d->queue[V3D_BIN].sched); + return ret; + } + + return 0; +} + +void +v3d_sched_fini(struct v3d_dev *v3d) +{ + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_fini(&v3d->queue[q].sched); +} diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h new file mode 100644 index 000000000000..85dd351e1e09 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _V3D_TRACE_H_ + +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM v3d +#define TRACE_INCLUDE_FILE v3d_trace + +TRACE_EVENT(v3d_submit_cl, + TP_PROTO(struct drm_device *dev, bool is_render, + uint64_t seqno, + u32 ctnqba, u32 ctnqea), + TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), + + TP_STRUCT__entry( + __field(u32, dev) + __field(bool, is_render) + __field(u64, seqno) + __field(u32, ctnqba) + __field(u32, ctnqea) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->is_render = is_render; + __entry->seqno = seqno; + __entry->ctnqba = ctnqba; + __entry->ctnqea = ctnqea; + ), + + TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x", + __entry->dev, + __entry->is_render ? "RCL" : "BCL", + __entry->seqno, + __entry->ctnqba, + __entry->ctnqea) +); + +TRACE_EVENT(v3d_reset_begin, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_reset_end, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +#endif /* _V3D_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/v3d/v3d_trace_points.c b/drivers/gpu/drm/v3d/v3d_trace_points.c new file mode 100644 index 000000000000..482922d7c7e1 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace_points.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015 Broadcom */ + +#include "v3d_drv.h" + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "v3d_trace.h" +#endif diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h new file mode 100644 index 000000000000..7b6627783608 --- /dev/null +++ b/include/uapi/drm/v3d_drm.h @@ -0,0 +1,194 @@ +/* + * Copyright © 2014-2018 Broadcom + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _V3D_DRM_H_ +#define _V3D_DRM_H_ + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#define DRM_V3D_SUBMIT_CL 0x00 +#define DRM_V3D_WAIT_BO 0x01 +#define DRM_V3D_CREATE_BO 0x02 +#define DRM_V3D_MMAP_BO 0x03 +#define DRM_V3D_GET_PARAM 0x04 +#define DRM_V3D_GET_BO_OFFSET 0x05 + +#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) +#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) +#define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo) +#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) +#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) +#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) + +/** + * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D + * engine. + * + * This asks the kernel to have the GPU execute an optional binner + * command list, and a render command list. + */ +struct drm_v3d_submit_cl { + /* Pointer to the binner command list. + * + * This is the first set of commands executed, which runs the + * coordinate shader to determine where primitives land on the screen, + * then writes out the state updates and draw calls necessary per tile + * to the tile allocation BO. + */ + __u32 bcl_start; + + /** End address of the BCL (first byte after the BCL) */ + __u32 bcl_end; + + /* Offset of the render command list. + * + * This is the second set of commands executed, which will either + * execute the tiles that have been set up by the BCL, or a fixed set + * of tiles (in the case of RCL-only blits). + */ + __u32 rcl_start; + + /** End address of the RCL (first byte after the RCL) */ + __u32 rcl_end; + + /** An optional sync object to wait on before starting the BCL. */ + __u32 in_sync_bcl; + /** An optional sync object to wait on before starting the RCL. */ + __u32 in_sync_rcl; + /** An optional sync object to place the completion fence in. */ + __u32 out_sync; + + /* Offset of the tile alloc memory + * + * This is optional on V3D 3.3 (where the CL can set the value) but + * required on V3D 4.1. + */ + __u32 qma; + + /** Size of the tile alloc memory. */ + __u32 qms; + + /** Offset of the tile state data array. */ + __u32 qts; + + /* Pointer to a u32 array of the BOs that are referenced by the job. + */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + /* Pad, must be zero-filled. */ + __u32 pad; +}; + +/** + * struct drm_v3d_wait_bo - ioctl argument for waiting for + * completion of the last DRM_V3D_SUBMIT_CL on a BO. + * + * This is useful for cases where multiple processes might be + * rendering to a BO and you want to wait for all rendering to be + * completed. + */ +struct drm_v3d_wait_bo { + __u32 handle; + __u32 pad; + __u64 timeout_ns; +}; + +/** + * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_v3d_create_bo { + __u32 size; + __u32 flags; + /** Returned GEM handle for the BO. */ + __u32 handle; + /** + * Returned offset for the BO in the V3D address space. This offset + * is private to the DRM fd and is valid for the lifetime of the GEM + * handle. + * + * This offset value will always be nonzero, since various HW + * units treat 0 specially. + */ + __u32 offset; +}; + +/** + * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs. + * + * This doesn't actually perform an mmap. Instead, it returns the + * offset you need to use in an mmap on the DRM device node. This + * means that tools like valgrind end up knowing about the mapped + * memory. + * + * There are currently no values for the flags argument, but it may be + * used in a future extension. + */ +struct drm_v3d_mmap_bo { + /** Handle for the object being mapped. */ + __u32 handle; + __u32 flags; + /** offset into the drm node to use for subsequent mmap call. */ + __u64 offset; +}; + +enum drm_v3d_param { + DRM_V3D_PARAM_V3D_UIFCFG, + DRM_V3D_PARAM_V3D_HUB_IDENT1, + DRM_V3D_PARAM_V3D_HUB_IDENT2, + DRM_V3D_PARAM_V3D_HUB_IDENT3, + DRM_V3D_PARAM_V3D_CORE0_IDENT0, + DRM_V3D_PARAM_V3D_CORE0_IDENT1, + DRM_V3D_PARAM_V3D_CORE0_IDENT2, +}; + +struct drm_v3d_get_param { + __u32 param; + __u32 pad; + __u64 value; +}; + +/** + * Returns the offset for the BO in the V3D address space for this DRM fd. + * This is the same value returned by drm_v3d_create_bo, if that was called + * from this DRM fd. + */ +struct drm_v3d_get_bo_offset { + __u32 handle; + __u32 offset; +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* _V3D_DRM_H_ */ -- cgit v1.2.3 From 7bd2d2ecedff26b3a87b026b98acc4b7110c9ee6 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Thu, 26 Apr 2018 23:36:44 +0200 Subject: drm/bridge: adv7511: fix spelling of driver name in Kconfig Could perhaps prevent some confusion. Signed-off-by: Peter Rosin Reviewed-by: Laurent Pinchart Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/20180426213644.29318-1-peda@axentia.se --- drivers/gpu/drm/bridge/adv7511/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig index 592b9d2ec034..944e440c4fde 100644 --- a/drivers/gpu/drm/bridge/adv7511/Kconfig +++ b/drivers/gpu/drm/bridge/adv7511/Kconfig @@ -1,5 +1,5 @@ config DRM_I2C_ADV7511 - tristate "AV7511 encoder" + tristate "ADV7511 encoder" depends on OF select DRM_KMS_HELPER select REGMAP_I2C -- cgit v1.2.3 From 5f27314141757794378abb2907fb7116947d644b Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 16:33:42 +0800 Subject: gpu: drm: bridge: adv7511: Replace mdelay with usleep_range in adv7511_probe adv7511_probe() is never called in atomic context. This function is only set as ".probe" in struct i2c_driver. Despite never getting called from atomic context, adv7511_probe() calls mdelay() to busily wait. This is not necessary and can be replaced with usleep_range() to avoid busy waiting. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Reviewed-by: Laurent Pinchart Signed-off-by: Archit Taneja Link: https://patchwork.freedesktop.org/patch/msgid/1523435622-4329-1-git-send-email-baijiaju1990@gmail.com --- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 2614cea538e2..73021b388e12 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1127,7 +1127,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) } if (adv7511->gpio_pd) { - mdelay(5); + usleep_range(5000, 6000); gpiod_set_value_cansleep(adv7511->gpio_pd, 0); } -- cgit v1.2.3 From 6f96f2000ac27b0f5f769b0bc2f0440ebfa1c3a3 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 3 May 2018 13:22:13 +0200 Subject: drm/rect: Round above 1 << 16 upwards to correct scale calculation functions. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When calculating limits we want to be as pessimistic as possible, so we have to explicitly say whether we want to round up or down to accurately calculate whether we are below min_scale or above max_scale. Signed-off-by: Maarten Lankhorst [mlankhorst: Fix wording in documentation. (Ville)] Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-2-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/drm_rect.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index a3783ecea297..a8e934795c7d 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -106,7 +106,10 @@ static int drm_calc_scale(int src, int dst) if (dst == 0) return 0; - scale = src / dst; + if (src > (dst << 16)) + return DIV_ROUND_UP(src, dst); + else + scale = src / dst; return scale; } @@ -121,6 +124,10 @@ static int drm_calc_scale(int src, int dst) * Calculate the horizontal scaling factor as * (@src width) / (@dst width). * + * If the scale is below 1 << 16, round down. If the scale is above + * 1 << 16, round up. This will calculate the scale with the most + * pessimistic limit calculation. + * * RETURNS: * The horizontal scaling factor, or errno of out of limits. */ @@ -152,6 +159,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale); * Calculate the vertical scaling factor as * (@src height) / (@dst height). * + * If the scale is below 1 << 16, round down. If the scale is above + * 1 << 16, round up. This will calculate the scale with the most + * pessimistic limit calculation. + * * RETURNS: * The vertical scaling factor, or errno of out of limits. */ @@ -189,6 +200,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale); * If the calculated scaling factor is above @max_vscale, * decrease the height of rectangle @src to compensate. * + * If the scale is below 1 << 16, round down. If the scale is above + * 1 << 16, round up. This will calculate the scale with the most + * pessimistic limit calculation. + * * RETURNS: * The horizontal scaling factor. */ @@ -239,6 +254,10 @@ EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed); * If the calculated scaling factor is above @max_vscale, * decrease the height of rectangle @src to compensate. * + * If the scale is below 1 << 16, round down. If the scale is above + * 1 << 16, round up. This will calculate the scale with the most + * pessimistic limit calculation. + * * RETURNS: * The vertical scaling factor. */ -- cgit v1.2.3 From f96bdf564f3e7511aecdd4c35cc18ac5e0750a2f Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 3 May 2018 13:22:14 +0200 Subject: drm/rect: Handle rounding errors in drm_rect_clip_scaled, v3. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of relying on a scale which may increase rounding errors, clip src by doing: src * (dst - clip) / dst and rounding the result away from 1, so the new coordinates get closer to 1. We won't need to fix up with a magic macro afterwards, because our scaling factor will never go to the other side of 1. Changes since v1: - Adjust dst immediately, else drm_rect_width/height on dst gives bogus results. Change since v2: - Get rid of macros and use 64-bits math. Signed-off-by: Maarten Lankhorst [mlankhorst: Add Villes comment, and rename newsrc to tmp. (Ville)] Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-3-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/drm_atomic_helper.c | 2 +- drivers/gpu/drm/drm_rect.c | 49 +++++++++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_sprite.c | 2 +- include/drm/drm_rect.h | 3 +-- 4 files changed, 39 insertions(+), 17 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9cb2209f6fc8..130da5195f3b 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -766,7 +766,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, if (crtc_state->enable) drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); - plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale); + plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index a8e934795c7d..8c057829b804 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -50,13 +50,25 @@ bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2) } EXPORT_SYMBOL(drm_rect_intersect); +static u32 clip_scaled(u32 src, u32 dst, u32 clip) +{ + u64 tmp = mul_u32_u32(src, dst - clip); + + /* + * Round toward 1.0 when clipping so that we don't accidentally + * change upscaling to downscaling or vice versa. + */ + if (src < (dst << 16)) + return DIV_ROUND_UP_ULL(tmp, dst); + else + return DIV_ROUND_DOWN_ULL(tmp, dst); +} + /** * drm_rect_clip_scaled - perform a scaled clip operation * @src: source window rectangle * @dst: destination window rectangle * @clip: clip rectangle - * @hscale: horizontal scaling factor - * @vscale: vertical scaling factor * * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the * same amounts multiplied by @hscale and @vscale. @@ -66,33 +78,44 @@ EXPORT_SYMBOL(drm_rect_intersect); * %false otherwise */ bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, - const struct drm_rect *clip, - int hscale, int vscale) + const struct drm_rect *clip) { int diff; diff = clip->x1 - dst->x1; if (diff > 0) { - int64_t tmp = src->x1 + (int64_t) diff * hscale; - src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); + u32 new_src_w = clip_scaled(drm_rect_width(src), + drm_rect_width(dst), diff); + + src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX); + dst->x1 = clip->x1; } diff = clip->y1 - dst->y1; if (diff > 0) { - int64_t tmp = src->y1 + (int64_t) diff * vscale; - src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); + u32 new_src_h = clip_scaled(drm_rect_height(src), + drm_rect_height(dst), diff); + + src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX); + dst->y1 = clip->y1; } diff = dst->x2 - clip->x2; if (diff > 0) { - int64_t tmp = src->x2 - (int64_t) diff * hscale; - src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); + u32 new_src_w = clip_scaled(drm_rect_width(src), + drm_rect_width(dst), diff); + + src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX); + dst->x2 = clip->x2; } diff = dst->y2 - clip->y2; if (diff > 0) { - int64_t tmp = src->y2 - (int64_t) diff * vscale; - src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); + u32 new_src_h = clip_scaled(drm_rect_height(src), + drm_rect_height(dst), diff); + + src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX); + dst->y2 = clip->y2; } - return drm_rect_intersect(dst, clip); + return drm_rect_visible(dst); } EXPORT_SYMBOL(drm_rect_clip_scaled); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index dbdcf85032df..e17c26a1cff1 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1003,7 +1003,7 @@ intel_check_sprite_plane(struct intel_plane *plane, drm_mode_get_hv_timing(&crtc_state->base.mode, &clip.x2, &clip.y2); - state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale); + state->base.visible = drm_rect_clip_scaled(src, dst, &clip); crtc_x = dst->x1; crtc_y = dst->y1; diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 44bc122b9ee0..6c54544a4be7 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -175,8 +175,7 @@ static inline bool drm_rect_equals(const struct drm_rect *r1, bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, - const struct drm_rect *clip, - int hscale, int vscale); + const struct drm_rect *clip); int drm_rect_calc_hscale(const struct drm_rect *src, const struct drm_rect *dst, int min_hscale, int max_hscale); -- cgit v1.2.3 From 9c1659ebe77d7e111dac4bdc7e082136d223ffb5 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 3 May 2018 13:22:15 +0200 Subject: drm/i915: Do not adjust scale when out of bounds, v2. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the previous patch drm_atomic_helper_check_plane_state correctly calculates clipping and the xf86-video-intel ddx is fixed to fall back to GPU correctly when SetPlane fails, we can remove the hack where we try to pan/zoom when out of min/max scaling range. This was already poor behavior where the screen didn't show what was requested, and now instead we reject it outright. This simplifies check_sprite_plane a lot. Changes since v1: - Set crtc_h to the height correctly. - Reject < 3x3 rectangles instead of making them invisible for Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-4-maarten.lankhorst@linux.intel.com Acked-by: Jani Nikula --- drivers/gpu/drm/i915/intel_sprite.c | 144 +++++++++--------------------------- 1 file changed, 35 insertions(+), 109 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index e17c26a1cff1..344228b640b9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -935,21 +935,11 @@ intel_check_sprite_plane(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = state->base.fb; - int crtc_x, crtc_y; - unsigned int crtc_w, crtc_h; - uint32_t src_x, src_y, src_w, src_h; - struct drm_rect *src = &state->base.src; - struct drm_rect *dst = &state->base.dst; - struct drm_rect clip = {}; int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384; - int hscale, vscale; int max_scale, min_scale; bool can_scale; int ret; - *src = drm_plane_state_src(&state->base); - *dst = drm_plane_state_dest(&state->base); - if (!fb) { state->base.visible = false; return 0; @@ -985,64 +975,19 @@ intel_check_sprite_plane(struct intel_plane *plane, min_scale = plane->can_scale ? 1 : (1 << 16); } - /* - * FIXME the following code does a bunch of fuzzy adjustments to the - * coordinates and sizes. We probably need some way to decide whether - * more strict checking should be done instead. - */ - drm_rect_rotate(src, fb->width << 16, fb->height << 16, - state->base.rotation); - - hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale); - BUG_ON(hscale < 0); - - vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); - BUG_ON(vscale < 0); - - if (crtc_state->base.enable) - drm_mode_get_hv_timing(&crtc_state->base.mode, - &clip.x2, &clip.y2); - - state->base.visible = drm_rect_clip_scaled(src, dst, &clip); - - crtc_x = dst->x1; - crtc_y = dst->y1; - crtc_w = drm_rect_width(dst); - crtc_h = drm_rect_height(dst); + ret = drm_atomic_helper_check_plane_state(&state->base, + &crtc_state->base, + min_scale, max_scale, + true, true); + if (ret) + return ret; if (state->base.visible) { - /* check again in case clipping clamped the results */ - hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); - if (hscale < 0) { - DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); - drm_rect_debug_print("src: ", src, true); - drm_rect_debug_print("dst: ", dst, false); - - return hscale; - } - - vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); - if (vscale < 0) { - DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); - drm_rect_debug_print("src: ", src, true); - drm_rect_debug_print("dst: ", dst, false); - - return vscale; - } - - /* Make the source viewport size an exact multiple of the scaling factors. */ - drm_rect_adjust_size(src, - drm_rect_width(dst) * hscale - drm_rect_width(src), - drm_rect_height(dst) * vscale - drm_rect_height(src)); - - drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, - state->base.rotation); - - /* sanity check to make sure the src viewport wasn't enlarged */ - WARN_ON(src->x1 < (int) state->base.src_x || - src->y1 < (int) state->base.src_y || - src->x2 > (int) state->base.src_x + state->base.src_w || - src->y2 > (int) state->base.src_y + state->base.src_h); + struct drm_rect *src = &state->base.src; + struct drm_rect *dst = &state->base.dst; + unsigned int crtc_w = drm_rect_width(dst); + unsigned int crtc_h = drm_rect_height(dst); + uint32_t src_x, src_y, src_w, src_h; /* * Hardware doesn't handle subpixel coordinates. @@ -1055,58 +1000,39 @@ intel_check_sprite_plane(struct intel_plane *plane, src_y = src->y1 >> 16; src_h = drm_rect_height(src) >> 16; - if (intel_format_is_yuv(fb->format->format)) { - src_x &= ~1; - src_w &= ~1; - - /* - * Must keep src and dst the - * same if we can't scale. - */ - if (!can_scale) - crtc_w &= ~1; + src->x1 = src_x << 16; + src->x2 = (src_x + src_w) << 16; + src->y1 = src_y << 16; + src->y2 = (src_y + src_h) << 16; - if (crtc_w == 0) - state->base.visible = false; + if (intel_format_is_yuv(fb->format->format) && + (src_x % 2 || src_w % 2)) { + DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", + src_x, src_w); + return -EINVAL; } - } - /* Check size restrictions when scaling */ - if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) { - unsigned int width_bytes; - int cpp = fb->format->cpp[0]; + /* Check size restrictions when scaling */ + if (src_w != crtc_w || src_h != crtc_h) { + unsigned int width_bytes; + int cpp = fb->format->cpp[0]; - WARN_ON(!can_scale); + WARN_ON(!can_scale); - /* FIXME interlacing min height is 6 */ + width_bytes = ((src_x * cpp) & 63) + src_w * cpp; - if (crtc_w < 3 || crtc_h < 3) - state->base.visible = false; - - if (src_w < 3 || src_h < 3) - state->base.visible = false; - - width_bytes = ((src_x * cpp) & 63) + src_w * cpp; - - if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 || - width_bytes > 4096 || fb->pitches[0] > 4096)) { - DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n"); - return -EINVAL; + /* FIXME interlacing min height is 6 */ + if (INTEL_GEN(dev_priv) < 9 && ( + src_w < 3 || src_h < 3 || + src_w > 2048 || src_h > 2048 || + crtc_w < 3 || crtc_h < 3 || + width_bytes > 4096 || fb->pitches[0] > 4096)) { + DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n"); + return -EINVAL; + } } } - if (state->base.visible) { - src->x1 = src_x << 16; - src->x2 = (src_x + src_w) << 16; - src->y1 = src_y << 16; - src->y2 = (src_y + src_h) << 16; - } - - dst->x1 = crtc_x; - dst->x2 = crtc_x + crtc_w; - dst->y1 = crtc_y; - dst->y2 = crtc_y + crtc_h; - if (INTEL_GEN(dev_priv) >= 9) { ret = skl_check_plane_surface(crtc_state, state); if (ret) -- cgit v1.2.3 From 34b13e5e4641c0e9e0aad471a6d8dfb7999276f1 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 3 May 2018 13:22:16 +0200 Subject: drm/selftests: Rename the Kconfig option to CONFIG_DRM_DEBUG_SELFTEST MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to add more DRM selftests, and there's not much point in having a Kconfig option for every single one of them, so make a generic one. Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-5-maarten.lankhorst@linux.intel.com [mlankhorst: Fix i915/Kconfig.debug (ickle)] Acked-by: Chris Wilson Acked-by: Daniel Vetter --- drivers/gpu/drm/Kconfig | 8 ++++---- drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/i915/Kconfig.debug | 2 +- drivers/gpu/drm/selftests/Makefile | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1c73a455fdb1..aa0b0d830beb 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -49,16 +49,16 @@ config DRM_DEBUG_MM If in doubt, say "N". -config DRM_DEBUG_MM_SELFTEST - tristate "kselftests for DRM range manager (struct drm_mm)" +config DRM_DEBUG_SELFTEST + tristate "kselftests for DRM" depends on DRM depends on DEBUG_KERNEL select PRIME_NUMBERS select DRM_LIB_RANDOM default n help - This option provides a kernel module that can be used to test - the DRM range manager (drm_mm) and its API. This option is not + This option provides kernel modules that can be used to run + various selftests on parts of the DRM api. This option is not useful for distributions or general kernels, but only for kernel developers working on DRM and associated drivers. diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7a401edd8761..ef9f3dab287f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -43,7 +43,7 @@ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o -obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/ +obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 108d21f34777..8c7972df9f0f 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -25,7 +25,7 @@ config DRM_I915_DEBUG select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y - select DRM_DEBUG_MM_SELFTEST + select DRM_DEBUG_SELFTEST select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS select DRM_I915_SELFTEST diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile index 4aebfc7f27d4..f7dd66e859a9 100644 --- a/drivers/gpu/drm/selftests/Makefile +++ b/drivers/gpu/drm/selftests/Makefile @@ -1 +1 @@ -obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o +obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o -- cgit v1.2.3 From 7420e04963587dc8f6d4e8b7e79b3ad7ab5c5300 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 3 May 2018 13:22:17 +0200 Subject: drm/selftests: Add drm helper selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180503112217.37292-6-maarten.lankhorst@linux.intel.com Acked-by: Daniel Vetter --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/selftests/Makefile | 2 +- drivers/gpu/drm/selftests/drm_helper_selftests.h | 9 + drivers/gpu/drm/selftests/test-drm-helper.c | 247 +++++++++++++++++++++++ 4 files changed, 258 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/selftests/drm_helper_selftests.h create mode 100644 drivers/gpu/drm/selftests/test-drm-helper.c (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index aa0b0d830beb..2a72d2feb76d 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -55,6 +55,7 @@ config DRM_DEBUG_SELFTEST depends on DEBUG_KERNEL select PRIME_NUMBERS select DRM_LIB_RANDOM + select DRM_KMS_HELPER default n help This option provides kernel modules that can be used to run diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile index f7dd66e859a9..9fc349fa18e9 100644 --- a/drivers/gpu/drm/selftests/Makefile +++ b/drivers/gpu/drm/selftests/Makefile @@ -1 +1 @@ -obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o +obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o diff --git a/drivers/gpu/drm/selftests/drm_helper_selftests.h b/drivers/gpu/drm/selftests/drm_helper_selftests.h new file mode 100644 index 000000000000..9771290ed228 --- /dev/null +++ b/drivers/gpu/drm/selftests/drm_helper_selftests.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* List each unit test as selftest(name, function) + * + * The name is used as both an enum and expanded as igt__name to create + * a module parameter. It must be unique and legal for a C identifier. + * + * Tests are executed in order by igt/drm_selftests_helper + */ +selftest(check_plane_state, igt_check_plane_state) diff --git a/drivers/gpu/drm/selftests/test-drm-helper.c b/drivers/gpu/drm/selftests/test-drm-helper.c new file mode 100644 index 000000000000..a015712b43e8 --- /dev/null +++ b/drivers/gpu/drm/selftests/test-drm-helper.c @@ -0,0 +1,247 @@ +/* + * Test cases for the drm_kms_helper functions + */ + +#define pr_fmt(fmt) "drm_kms_helper: " fmt + +#include + +#include +#include +#include + +#define TESTS "drm_helper_selftests.h" +#include "drm_selftest.h" + +#define FAIL(test, msg, ...) \ + do { \ + if (test) { \ + pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + return -EINVAL; \ + } \ + } while (0) + +#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n") + +static void set_src(struct drm_plane_state *plane_state, + unsigned src_x, unsigned src_y, + unsigned src_w, unsigned src_h) +{ + plane_state->src_x = src_x; + plane_state->src_y = src_y; + plane_state->src_w = src_w; + plane_state->src_h = src_h; +} + +static bool check_src_eq(struct drm_plane_state *plane_state, + unsigned src_x, unsigned src_y, + unsigned src_w, unsigned src_h) +{ + if (plane_state->src.x1 < 0) { + pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1); + drm_rect_debug_print("src: ", &plane_state->src, true); + return false; + } + if (plane_state->src.y1 < 0) { + pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1); + drm_rect_debug_print("src: ", &plane_state->src, true); + return false; + } + + if (plane_state->src.x1 != src_x || + plane_state->src.y1 != src_y || + drm_rect_width(&plane_state->src) != src_w || + drm_rect_height(&plane_state->src) != src_h) { + drm_rect_debug_print("src: ", &plane_state->src, true); + return false; + } + + return true; +} + +static void set_crtc(struct drm_plane_state *plane_state, + int crtc_x, int crtc_y, + unsigned crtc_w, unsigned crtc_h) +{ + plane_state->crtc_x = crtc_x; + plane_state->crtc_y = crtc_y; + plane_state->crtc_w = crtc_w; + plane_state->crtc_h = crtc_h; +} + +static bool check_crtc_eq(struct drm_plane_state *plane_state, + int crtc_x, int crtc_y, + unsigned crtc_w, unsigned crtc_h) +{ + if (plane_state->dst.x1 != crtc_x || + plane_state->dst.y1 != crtc_y || + drm_rect_width(&plane_state->dst) != crtc_w || + drm_rect_height(&plane_state->dst) != crtc_h) { + drm_rect_debug_print("dst: ", &plane_state->dst, false); + + return false; + } + + return true; +} + +static int igt_check_plane_state(void *ignored) +{ + int ret; + + const struct drm_crtc_state crtc_state = { + .crtc = ZERO_SIZE_PTR, + .enable = true, + .active = true, + .mode = { + DRM_MODE("1024x768", 0, 65000, 1024, 1048, + 1184, 1344, 0, 768, 771, 777, 806, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) + }, + }; + struct drm_framebuffer fb = { + .width = 2048, + .height = 2048 + }; + struct drm_plane_state plane_state = { + .crtc = ZERO_SIZE_PTR, + .fb = &fb, + .rotation = DRM_MODE_ROTATE_0 + }; + + /* Simple clipping, no scaling. */ + set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16); + set_crtc(&plane_state, 0, 0, fb.width, fb.height); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(ret < 0, "Simple clipping check should pass\n"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + + /* Rotated clipping + reflection, no scaling. */ + plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X; + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(ret < 0, "Rotated clipping check should pass\n"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + plane_state.rotation = DRM_MODE_ROTATE_0; + + /* Check whether positioning works correctly. */ + set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16); + set_crtc(&plane_state, 0, 0, 1023, 767); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n"); + + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, false); + FAIL(ret < 0, "Simple positioning should work\n"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767)); + + /* Simple scaling tests. */ + set_src(&plane_state, 0, 0, 512 << 16, 384 << 16); + set_crtc(&plane_state, 0, 0, 1024, 768); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + 0x8001, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(!ret, "Upscaling out of range should fail.\n"); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + 0x8000, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(ret < 0, "Upscaling exactly 2x should work\n"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + + set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + 0x1ffff, false, false); + FAIL(!ret, "Downscaling out of range should fail.\n"); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + 0x20000, false, false); + FAIL(ret < 0, "Should succeed with exact scaling limit\n"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + + /* Testing rounding errors. */ + set_src(&plane_state, 0, 0, 0x40001, 0x40001); + set_crtc(&plane_state, 1022, 766, 4, 4); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + 0x10001, + true, false); + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2)); + + set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001); + set_crtc(&plane_state, -2, -2, 1028, 772); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + 0x10001, + false, false); + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + + set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff); + set_crtc(&plane_state, 1022, 766, 4, 4); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + 0xffff, + DRM_PLANE_HELPER_NO_SCALING, + true, false); + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); + FAIL_ON(!plane_state.visible); + /* Should not be rounded to 0x20001, which would be upscaling. */ + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2)); + + set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff); + set_crtc(&plane_state, -2, -2, 1028, 772); + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, + 0xffff, + DRM_PLANE_HELPER_NO_SCALING, + false, false); + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); + FAIL_ON(!plane_state.visible); + FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16)); + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); + + return 0; +} + +#include "drm_selftest.c" + +static int __init test_drm_helper_init(void) +{ + int err; + + err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL); + + return err > 0 ? 0 : err; +} + +module_init(test_drm_helper_init); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From e30ca4bcf0d9ad2c6f5716d6098b935f0d584c76 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 May 2018 12:26:50 +0300 Subject: drm/xen-front: checking for NULL instead of IS_ERR drm_dev_alloc() returns error pointers, it never returns NULL. Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend") Signed-off-by: Dan Carpenter Reviewed-by: Oleksandr Andrushchenko Signed-off-by: Oleksandr Andrushchenko Link: https://patchwork.freedesktop.org/patch/msgid/20180508092650.GA661@mwanda --- drivers/gpu/drm/xen/xen_drm_front.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 1b0ea9ac330e..8615e8522c7a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -543,8 +543,8 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info) front_info->drm_info = drm_info; drm_dev = drm_dev_alloc(&xen_drm_driver, dev); - if (!drm_dev) { - ret = -ENOMEM; + if (IS_ERR(drm_dev)) { + ret = PTR_ERR(drm_dev); goto fail; } -- cgit v1.2.3 From 18f20bc5303cf6276cce9ae1742f3835244ad087 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 May 2018 12:27:39 +0300 Subject: drm/xen-front: fix xen_drm_front_shbuf_alloc() error handling The xen_drm_front_shbuf_alloc() function was returning a mix of error pointers and NULL and the the caller wasn't checking correctly. I've changed it to always return error pointer consistently. Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend") Signed-off-by: Dan Carpenter Reviewed-by: Oleksandr Andrushchenko Signed-off-by: Oleksandr Andrushchenko Link: https://patchwork.freedesktop.org/patch/msgid/20180508092739.GB661@mwanda --- drivers/gpu/drm/xen/xen_drm_front.c | 4 ++-- drivers/gpu/drm/xen/xen_drm_front_shbuf.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 8615e8522c7a..378cb7ce0db5 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -188,8 +188,8 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, buf_cfg.be_alloc = front_info->cfg.be_alloc; shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); - if (!shbuf) - return -ENOMEM; + if (IS_ERR(shbuf)) + return PTR_ERR(shbuf); ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); if (ret < 0) { diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c index d5705251a0d6..8099cb343ae3 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c +++ b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c @@ -383,7 +383,7 @@ xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg) buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) - return NULL; + return ERR_PTR(-ENOMEM); if (cfg->be_alloc) buf->ops = &backend_ops; -- cgit v1.2.3 From f45140df31717bfab0974a722800d3ac0587b3f0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 May 2018 12:28:29 +0300 Subject: drm/xen-front: Fix loop timeout If the loop times out then we want to exit with "to" set to zero, but in the current code it's set to -1. Fixes: c575b7eeb89f ("drm/xen-front: Add support for Xen PV display frontend") Signed-off-by: Dan Carpenter Reviewed-by: Oleksandr Andrushchenko Signed-off-by: Oleksandr Andrushchenko Link: https://patchwork.freedesktop.org/patch/msgid/20180508092829.GC661@mwanda --- drivers/gpu/drm/xen/xen_drm_front.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 378cb7ce0db5..3345ac71b391 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -778,7 +778,7 @@ static int xen_drv_remove(struct xenbus_device *dev) */ while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", XenbusStateUnknown) != XenbusStateInitWait) && - to--) + --to) msleep(10); if (!to) { -- cgit v1.2.3 From 2f065d8ae918159791474049ab67a0cb85723b81 Mon Sep 17 00:00:00 2001 From: Matt Atwood Date: Fri, 4 May 2018 15:18:00 -0700 Subject: drm/dp: Correctly mask DP_TRAINING_AUX_RD_INTERVAL values for DP 1.4 DP_TRAINING_AUX_RD_INTERVAL with DP 1.3 spec changed bit scheeme from 8 bits to 7 in DPCD 0x000e. The 8th bit is used to identify extended receiver capabilities. For panels that use this new feature wait interval would be increased by 512 ms, when spec is max 16 ms. This behavior is described in table 2-158 of DP 1.4 spec address 0000eh. With the introduction of DP 1.4 spec main link clock recovery was standardized to 100 us regardless of TRAINING_AUX_RD_INTERVAL value. To avoid breaking panels that are not spec compiant we now warn on invalid values. V2: commit title/message, masking all 7 bits, warn on out of spec values. V3: commit message, make link train clock recovery follow DP 1.4 spec. V4: style changes V5: typo V6: print statement revisions, DP_REV to DPCD_REV, comment correction V7: typo V8: Style V9: Strip out DPCD_REV_XX into seperate patch v10: DPCD_REV_XX to DP_DPCD_REV_XX Signed-off-by: Matt Atwood Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180504221800.17830-2-matthew.s.atwood@intel.com --- drivers/gpu/drm/drm_dp_helper.c | 22 ++++++++++++++++++---- include/drm/drm_dp_helper.h | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index ffe14ec3e7f2..36c7609a4bd5 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -119,18 +119,32 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + rd_interval); + + if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) udelay(100); else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); + mdelay(rd_interval * 4); } EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + rd_interval); + + if (rd_interval == 0) udelay(400); else - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); + mdelay(rd_interval * 4); } EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index fc01341a46fa..c7b285637f86 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -124,6 +124,7 @@ # define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */ #define DP_ADAPTER_CAP 0x00f /* 1.2 */ # define DP_FORCE_LOAD_SENSE_CAP (1 << 0) -- cgit v1.2.3 From 818c05d8e2679e27800b9ce5bc2b0a89ecd164e8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 3 May 2018 21:41:19 +0300 Subject: drm: panel-orientation-quirks: Convert to use match_string() helper The new helper returns index of the matching string in an array. We are going to use it here. Acked-by: Hans de Goede Signed-off-by: Andy Shevchenko Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20180503184119.22355-1-andriy.shevchenko@linux.intel.com --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index caebddda8bce..fe9c6c731e87 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -172,10 +172,9 @@ int drm_get_panel_orientation_quirk(int width, int height) if (!bios_date) continue; - for (i = 0; data->bios_dates[i]; i++) { - if (!strcmp(data->bios_dates[i], bios_date)) - return data->orientation; - } + i = match_string(data->bios_dates, -1, bios_date); + if (i >= 0) + return data->orientation; } return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; -- cgit v1.2.3 From dd7c2626329468c0344a794187b467d34c3640cb Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 May 2018 16:39:36 +0530 Subject: drm/modes: Introduce drm_mode_match() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make mode matching less confusing by allowing the caller to specify which parts of the modes should match via some flags. Signed-off-by: Ville Syrjälä Reviewed-by: Shashank Sharma Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-2-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_modes.c | 134 ++++++++++++++++++++++++++++++++++---------- include/drm/drm_modes.h | 9 +++ 2 files changed, 112 insertions(+), 31 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index e82b61e08f8c..c395a244f665 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -939,17 +939,68 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, } EXPORT_SYMBOL(drm_mode_duplicate); +static bool drm_mode_match_timings(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return mode1->hdisplay == mode2->hdisplay && + mode1->hsync_start == mode2->hsync_start && + mode1->hsync_end == mode2->hsync_end && + mode1->htotal == mode2->htotal && + mode1->hskew == mode2->hskew && + mode1->vdisplay == mode2->vdisplay && + mode1->vsync_start == mode2->vsync_start && + mode1->vsync_end == mode2->vsync_end && + mode1->vtotal == mode2->vtotal && + mode1->vscan == mode2->vscan; +} + +static bool drm_mode_match_clock(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + /* + * do clock check convert to PICOS + * so fb modes get matched the same + */ + if (mode1->clock && mode2->clock) + return KHZ2PICOS(mode1->clock) == KHZ2PICOS(mode2->clock); + else + return mode1->clock == mode2->clock; +} + +static bool drm_mode_match_flags(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) == + (mode2->flags & ~DRM_MODE_FLAG_3D_MASK); +} + +static bool drm_mode_match_3d_flags(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return (mode1->flags & DRM_MODE_FLAG_3D_MASK) == + (mode2->flags & DRM_MODE_FLAG_3D_MASK); +} + +static bool drm_mode_match_aspect_ratio(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return mode1->picture_aspect_ratio == mode2->picture_aspect_ratio; +} + /** - * drm_mode_equal - test modes for equality + * drm_mode_match - test modes for (partial) equality * @mode1: first mode * @mode2: second mode + * @match_flags: which parts need to match (DRM_MODE_MATCH_*) * * Check to see if @mode1 and @mode2 are equivalent. * * Returns: - * True if the modes are equal, false otherwise. + * True if the modes are (partially) equal, false otherwise. */ -bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) +bool drm_mode_match(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2, + unsigned int match_flags) { if (!mode1 && !mode2) return true; @@ -957,15 +1008,48 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ if (!mode1 || !mode2) return false; - /* do clock check convert to PICOS so fb modes get matched - * the same */ - if (mode1->clock && mode2->clock) { - if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock)) - return false; - } else if (mode1->clock != mode2->clock) + if (match_flags & DRM_MODE_MATCH_TIMINGS && + !drm_mode_match_timings(mode1, mode2)) return false; - return drm_mode_equal_no_clocks(mode1, mode2); + if (match_flags & DRM_MODE_MATCH_CLOCK && + !drm_mode_match_clock(mode1, mode2)) + return false; + + if (match_flags & DRM_MODE_MATCH_FLAGS && + !drm_mode_match_flags(mode1, mode2)) + return false; + + if (match_flags & DRM_MODE_MATCH_3D_FLAGS && + !drm_mode_match_3d_flags(mode1, mode2)) + return false; + + if (match_flags & DRM_MODE_MATCH_ASPECT_RATIO && + !drm_mode_match_aspect_ratio(mode1, mode2)) + return false; + + return true; +} +EXPORT_SYMBOL(drm_mode_match); + +/** + * drm_mode_equal - test modes for equality + * @mode1: first mode + * @mode2: second mode + * + * Check to see if @mode1 and @mode2 are equivalent. + * + * Returns: + * True if the modes are equal, false otherwise. + */ +bool drm_mode_equal(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return drm_mode_match(mode1, mode2, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS); } EXPORT_SYMBOL(drm_mode_equal); @@ -980,13 +1064,13 @@ EXPORT_SYMBOL(drm_mode_equal); * Returns: * True if the modes are equal, false otherwise. */ -bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) { - if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != - (mode2->flags & DRM_MODE_FLAG_3D_MASK)) - return false; - - return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); + return drm_mode_match(mode1, mode2, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS); } EXPORT_SYMBOL(drm_mode_equal_no_clocks); @@ -1004,21 +1088,9 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks); bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) { - if (mode1->hdisplay == mode2->hdisplay && - mode1->hsync_start == mode2->hsync_start && - mode1->hsync_end == mode2->hsync_end && - mode1->htotal == mode2->htotal && - mode1->hskew == mode2->hskew && - mode1->vdisplay == mode2->vdisplay && - mode1->vsync_start == mode2->vsync_start && - mode1->vsync_end == mode2->vsync_end && - mode1->vtotal == mode2->vtotal && - mode1->vscan == mode2->vscan && - (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) == - (mode2->flags & ~DRM_MODE_FLAG_3D_MASK)) - return true; - - return false; + return drm_mode_match(mode1, mode2, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_FLAGS); } EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo); diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 0d310beae6af..2f78b7ee4824 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -147,6 +147,12 @@ enum drm_mode_status { #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF +#define DRM_MODE_MATCH_TIMINGS (1 << 0) +#define DRM_MODE_MATCH_CLOCK (1 << 1) +#define DRM_MODE_MATCH_FLAGS (1 << 2) +#define DRM_MODE_MATCH_3D_FLAGS (1 << 3) +#define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) + /** * struct drm_display_mode - DRM kernel-internal display mode structure * @hdisplay: horizontal display size @@ -490,6 +496,9 @@ void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); +bool drm_mode_match(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2, + unsigned int match_flags); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, -- cgit v1.2.3 From a2328fd657017557606264c61074e609adfbb3ce Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 May 2018 16:39:37 +0530 Subject: drm/edid: Use drm_mode_match_no_clocks_no_stereo() for consistentcy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use drm_mode_equal_no_clocks_no_stereo() in drm_match_hdmi_mode_clock_tolerance() for consistency as we also use it in drm_match_hdmi_mode() and the cea mode matching functions. This doesn't actually change anything since the input mode comes from detailed timings and we match it against edid_4k_modes[] which. So none of those modes can have stereo flags set. Signed-off-by: Ville Syrjälä Reviewed-by: Shashank Sharma Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-3-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_edid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 61dd9a2fbe5b..aa70da86ef2c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3047,7 +3047,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ abs(to_match->clock - clock2) > clock_tolerance) continue; - if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) + if (drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) return vic; } -- cgit v1.2.3 From 357768cc9e3fdacf6551da0ae1483bc87dbcb4e8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 May 2018 16:39:38 +0530 Subject: drm/edid: Fix cea mode aspect ratio handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6dffd431e229 ("drm: Add aspect ratio parsing in DRM layer") cause us to not send out any VICs in the AVI infoframes. That commit was since reverted, but if and when we add aspect ratio handing back we need to be more careful. Let's handle this by considering the aspect ratio as a requirement for cea mode matching only if the passed in mode actually has a non-zero aspect ratio field. This will keep userspace that doesn't provide an aspect ratio working as before by matching it to the first otherwise equal cea mode. And once userspace starts to provide the aspect ratio it will be considerd a hard requirement for the match. Also change the hdmi mode matching to use drm_mode_match() for consistency, but we don't match on aspect ratio there since the spec doesn't list a specific aspect ratio for those modes. Cc: Shashank Sharma Cc: "Lin, Jia" Cc: Akashdeep Sharma Cc: Jim Bride Cc: Jose Abreu Cc: Daniel Vetter Cc: Emil Velikov Signed-off-by: Ville Syrjälä Reviewed-by: Shashank Sharma Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-4-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_edid.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index aa70da86ef2c..ba68ff94d3b3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2930,11 +2930,15 @@ cea_mode_alternate_timings(u8 vic, struct drm_display_mode *mode) static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, unsigned int clock_tolerance) { + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; u8 vic; if (!to_match->clock) return 0; + if (to_match->picture_aspect_ratio) + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { struct drm_display_mode cea_mode = edid_cea_modes[vic]; unsigned int clock1, clock2; @@ -2948,7 +2952,7 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m continue; do { - if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) + if (drm_mode_match(to_match, &cea_mode, match_flags)) return vic; } while (cea_mode_alternate_timings(vic, &cea_mode)); } @@ -2965,11 +2969,15 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m */ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) { + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; u8 vic; if (!to_match->clock) return 0; + if (to_match->picture_aspect_ratio) + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; + for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { struct drm_display_mode cea_mode = edid_cea_modes[vic]; unsigned int clock1, clock2; @@ -2983,7 +2991,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) continue; do { - if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) + if (drm_mode_match(to_match, &cea_mode, match_flags)) return vic; } while (cea_mode_alternate_timings(vic, &cea_mode)); } @@ -3030,6 +3038,7 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, unsigned int clock_tolerance) { + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; u8 vic; if (!to_match->clock) @@ -3047,7 +3056,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ abs(to_match->clock - clock2) > clock_tolerance) continue; - if (drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) + if (drm_mode_match(to_match, hdmi_mode, match_flags)) return vic; } @@ -3064,6 +3073,7 @@ static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_ */ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) { + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; u8 vic; if (!to_match->clock) @@ -3079,7 +3089,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && - drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) + drm_mode_match(to_match, hdmi_mode, match_flags)) return vic; } return 0; -- cgit v1.2.3 From a9c266c27ee568e3028b804a447b1fea58209618 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 8 May 2018 16:39:39 +0530 Subject: drm/edid: Don't send bogus aspect ratios in AVI infoframes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the user mode would specify an aspect ratio other than 4:3 or 16:9 we now silently ignore it. Maybe a better apporoach is to return an error? Let's try that. Also we must be careful that we don't try to send illegal picture aspect in the infoframe as it's only capable of signalling none, 4:3, and 16:9. Currently we're sending these bogus infoframes whenever the cea mode specifies some other aspect ratio. Cc: Shashank Sharma Cc: Sean Paul Cc: Jose Abreu Cc: Daniel Vetter Cc: Emil Velikov Signed-off-by: Ville Syrjälä Reviewed-by: Shashank Sharma Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-5-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_edid.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ba68ff94d3b3..42a7e871aa2a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4838,6 +4838,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, bool is_hdmi2_sink) { + enum hdmi_picture_aspect picture_aspect; int err; if (!frame || !mode) @@ -4880,13 +4881,23 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, * Populate picture aspect ratio from either * user input (if specified) or from the CEA mode list. */ - if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || - mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) - frame->picture_aspect = mode->picture_aspect_ratio; - else if (frame->video_code > 0) - frame->picture_aspect = drm_get_cea_aspect_ratio( - frame->video_code); + picture_aspect = mode->picture_aspect_ratio; + if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) + picture_aspect = drm_get_cea_aspect_ratio(frame->video_code); + /* + * The infoframe can't convey anything but none, 4:3 + * and 16:9, so if the user has asked for anything else + * we can only satisfy it by specifying the right VIC. + */ + if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) { + if (picture_aspect != + drm_get_cea_aspect_ratio(frame->video_code)) + return -EINVAL; + picture_aspect = HDMI_PICTURE_ASPECT_NONE; + } + + frame->picture_aspect = picture_aspect; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; -- cgit v1.2.3 From 7595bda2fb4378ccbb8db1d0e8de56d15ea7f7fa Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 8 May 2018 16:39:41 +0530 Subject: drm: Add DRM client cap for aspect-ratio To enable aspect-ratio support in DRM, blindly exposing the aspect ratio information along with mode, can break things in existing non-atomic user-spaces which have no intention or support to use this aspect ratio information. To avoid this, a new drm client cap is required to enable a non-atomic user-space to advertise if it supports modes with aspect-ratio. Based on this cap value, the kernel will take a call on exposing the aspect ratio info in modes or not. This patch adds the client cap for aspect-ratio. Since no atomic-userspaces blow up on receiving aspect-ratio information, the client cap for aspect-ratio is always enabled for atomic clients. Cc: Ville Syrjala Cc: Shashank Sharma Signed-off-by: Ankit Nautiyal V3: rebase V4: As suggested by Marteen Lankhorst modified the commit message explaining the need to use the DRM cap for aspect-ratio. Also, tweaked the comment lines in the code for better understanding and clarity, as recommended by Shashank Sharma. V5: rebase V6: rebase V7: rebase V8: rebase V9: rebase V10: rebase V11: rebase V12: As suggested by Daniel Vetter and Ville Syrjala, always enable aspect-ratio client cap for atomic userspaces, if no atomic userspace breaks on aspect-ratio bits. V13: rebase V14: rebase Reviewed-by: Shashank Sharma Reviewed-by: Daniel Vetter Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-7-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_ioctl.c | 9 +++++++++ include/drm/drm_file.h | 8 ++++++++ include/uapi/drm/drm.h | 7 +++++++ 3 files changed, 24 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index eadeabc393f0..0d4cfb232576 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -324,6 +324,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->atomic = req->value; file_priv->universal_planes = req->value; + /* + * No atomic user-space blows up on aspect ratio mode bits. + */ + file_priv->aspect_ratio_allowed = req->value; + break; + case DRM_CLIENT_CAP_ASPECT_RATIO: + if (req->value > 1) + return -EINVAL; + file_priv->aspect_ratio_allowed = req->value; break; default: return -EINVAL; diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 99ab50cbab00..91a65a360079 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -180,6 +180,14 @@ struct drm_file { /** @atomic: True if client understands atomic properties. */ unsigned atomic:1; + /** + * @aspect_ratio_allowed: + * + * True, if client can handle picture aspect ratios, and has requested + * to pass this information along with the mode. + */ + unsigned aspect_ratio_allowed:1; + /** * @is_master: * diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 6fdff5945c8a..9c660e1688ab 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -680,6 +680,13 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_ATOMIC 3 +/** + * DRM_CLIENT_CAP_ASPECT_RATIO + * + * If set to 1, the DRM core will provide aspect ratio information in modes. + */ +#define DRM_CLIENT_CAP_ASPECT_RATIO 4 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; -- cgit v1.2.3 From ace5bf0e254b10585efa938d05e95ea05ae15326 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 8 May 2018 16:39:42 +0530 Subject: drm: Handle aspect ratio info in legacy modeset path If the user-space does not support aspect-ratio, and requests for a modeset with mode having aspect ratio bits set, then the given user-mode must be rejected. Secondly, while preparing a user-mode from kernel mode, the aspect-ratio info must not be given, if aspect-ratio is not supported by the user. This patch: 1. rejects the modes with aspect-ratio info, during modeset, if the user does not support aspect ratio. 2. does not load the aspect-ratio info in user-mode structure, if aspect ratio is not supported. 3. adds helper functions for determining if aspect-ratio is expected in user-mode and for allowing/disallowing the aspect-ratio, if its not expected. Signed-off-by: Ankit Nautiyal V3: Addressed review comments from Ville: Do not corrupt the current crtc state by updating aspect-ratio on the fly. V4: rebase V5: As suggested by Ville, rejected the modeset calls for modes with aspect ratio, if the user does not set aspect-ratio cap. V6: Used the helper functions for determining if aspect-ratio is expected in the user-mode. V7: rebase V8: rebase V9: rebase V10: Modified the commit-message V11: rebase V12: Merged the patch for adding aspect-ratio helper functions with this patch. V13: Minor modifications as suggested by Ville. V14: Removed helper functions, as they were used only once in legacy modeset path, as suggested by Daniel Vetter. Acked-by: Daniel Vetter Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-8-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_crtc.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index a231dd5dce16..98a36e6c69ad 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -449,6 +449,8 @@ int drm_mode_getcrtc(struct drm_device *dev, crtc_resp->mode_valid = 0; } } + if (!file_priv->aspect_ratio_allowed) + crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; drm_modeset_unlock(&crtc->mutex); return 0; @@ -628,6 +630,13 @@ retry: ret = -ENOMEM; goto out; } + if (!file_priv->aspect_ratio_allowed && + (crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) { + DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n"); + ret = -EINVAL; + goto out; + } + ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode); if (ret) { -- cgit v1.2.3 From c3ff0cdb354f89a5b877eee61af70e6ae51de50b Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 8 May 2018 16:39:43 +0530 Subject: drm: Expose modes with aspect ratio, only if requested We parse the EDID and add all the modes in the connector's modelist. This adds CEA modes with aspect ratio information too, regardless of whether user space requested this information or not. This patch: -prunes the modes with aspect-ratio information, from the drm_mode_get_connector modelist supplied to the user, if the user-space has not set the aspect ratio DRM client cap. However if such a mode is unique in the list, it is kept in the list, with aspect-ratio flags reset. -prepares a list of exposed modes, which is used to find unique modes if aspect-ratio is not allowed. -adds a new list_head 'exposed_head' in drm_mode_display, to traverse the list of exposed modes. Cc: Ville Syrjala Cc: Shashank Sharma Cc: Jose Abreu Signed-off-by: Ankit Nautiyal V3: As suggested by Ville, modified the mechanism of pruning of modes with aspect-ratio, if the aspect-ratio is not supported. Instead of straight away pruning such a mode, the mode is retained with aspect ratio bits set to zero, provided it is unique. V4: rebase V5: Addressed review comments from Ville: -used a pointer to store last valid mode. -avoided, modifying of picture_aspect_ratio in kernel mode, instead only flags bits of user mode are reset (if aspect-ratio is not supported). V6: As suggested by Ville, corrected the mode pruning logic and elaborated the mode pruning logic and the assumptions taken. V7: rebase V8: rebase V9: rebase V10: rebase V11: Fixed the issue caused in kms_3d test, and enhanced the pruning logic to correctly identify and prune modes with aspect-ratio, if aspect-ratio cap is not set. V12: As suggested by Ville, added another list_head in drm_mode_display to traverse the list of exposed modes and avoided duplication of modes. V13: Minor modifications, as suggested by Ville. v14: As suggested by Daniel Vetter and Ville Syrjala, corrected the pruning logic to avoid any dependency in the order of mode with aspect-ratio. Reviewed-by: Daniel Vetter Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-9-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_connector.c | 44 ++++++++++++++++++++++++++++++++++------- include/drm/drm_modes.h | 13 ++++++++++++ 2 files changed, 50 insertions(+), 7 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index dfc8ca1e9413..9b9ba5d5ec0c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1531,8 +1531,10 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne return connector->encoder; } -static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, - const struct drm_file *file_priv) +static bool +drm_mode_expose_to_userspace(const struct drm_display_mode *mode, + const struct list_head *export_list, + const struct drm_file *file_priv) { /* * If user-space hasn't configured the driver to expose the stereo 3D @@ -1540,6 +1542,23 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, */ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) return false; + /* + * If user-space hasn't configured the driver to expose the modes + * with aspect-ratio, don't expose them. However if such a mode + * is unique, let it be exposed, but reset the aspect-ratio flags + * while preparing the list of user-modes. + */ + if (!file_priv->aspect_ratio_allowed) { + struct drm_display_mode *mode_itr; + + list_for_each_entry(mode_itr, export_list, export_head) + if (drm_mode_match(mode_itr, mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return false; + } return true; } @@ -1559,6 +1578,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; + LIST_HEAD(export_list); if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; @@ -1607,21 +1627,31 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, /* delayed so we get modes regardless of pre-fill_modes state */ list_for_each_entry(mode, &connector->modes, head) - if (drm_mode_expose_to_userspace(mode, file_priv)) + if (drm_mode_expose_to_userspace(mode, &export_list, + file_priv)) { + list_add_tail(&mode->export_head, &export_list); mode_count++; + } /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. + * The modes that need to be exposed to the user are maintained in the + * 'export_list'. When the ioctl is called first time to determine the, + * space, the export_list gets filled, to find the no.of modes. In the + * 2nd time, the user modes are filled, one by one from the export_list. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; - list_for_each_entry(mode, &connector->modes, head) { - if (!drm_mode_expose_to_userspace(mode, file_priv)) - continue; - + list_for_each_entry(mode, &export_list, export_head) { drm_mode_convert_to_umode(&u_mode, mode); + /* + * Reset aspect ratio flags of user-mode, if modes with + * aspect-ratio are not supported. + */ + if (!file_priv->aspect_ratio_allowed) + u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 2f78b7ee4824..b159fe07fcf9 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -411,6 +411,19 @@ struct drm_display_mode { * Field for setting the HDMI picture aspect ratio of a mode. */ enum hdmi_picture_aspect picture_aspect_ratio; + + /** + * @export_head: + * + * struct list_head for modes to be exposed to the userspace. + * This is to maintain a list of exposed modes while preparing + * user-mode's list in drm_mode_getconnector ioctl. The purpose of this + * list_head only lies in the ioctl function, and is not expected to be + * used outside the function. + * Once used, the stale pointers are not reset, but left as it is, to + * avoid overhead of protecting it by mode_config.mutex. + */ + struct list_head export_head; }; /** -- cgit v1.2.3 From 222ec1618c3aceca1e61e1e73e559c647c2b946f Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Tue, 8 May 2018 16:39:44 +0530 Subject: drm: Add aspect ratio parsing in DRM layer Current DRM layer functions don't parse aspect ratio information while converting a user mode->kernel mode or vice versa. This causes modeset to pick mode with wrong aspect ratio, eventually causing failures in HDMI compliance test cases, due to wrong VIC. This patch adds aspect ratio information in DRM's mode conversion and mode comparision functions, to make sure kernel picks mode with right aspect ratio (as per the VIC). Background: This patch was once reviewed and merged, and later reverted due to lack of DRM cap protection. This is a re-spin of this patch, this time with DRM cap protection, to avoid aspect ratio information, when the client doesn't request for it. Review link: https://pw-emeril.freedesktop.org/patch/104068/ Background discussion: https://patchwork.kernel.org/patch/9379057/ Signed-off-by: Shashank Sharma Signed-off-by: Lin, Jia Signed-off-by: Akashdeep Sharma Reviewed-by: Jim Bride (V2) Reviewed-by: Jose Abreu (V4) Cc: Ville Syrjala Cc: Jim Bride Cc: Jose Abreu Cc: Ankit Nautiyal V3: modified the aspect-ratio check in drm_mode_equal as per new flags provided by Ville. https://patchwork.freedesktop.org/patch/188043/ V4: rebase V5: rebase V6: As recommended by Ville, avoided matching of aspect-ratio in drm_fb_helper, while trying to find a common mode among connectors for the target clone mode. V7: rebase V8: rebase V9: rebase V10: rebase V11: rebase V12: rebase V13: rebase V14: rebase Acked-by: Daniel Vetter Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-10-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_fb_helper.c | 12 ++++++++++-- drivers/gpu/drm/drm_modes.c | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 3 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0646b108030b..2ee1eaa66188 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2183,7 +2183,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, for (j = 0; j < i; j++) { if (!enabled[j]) continue; - if (!drm_mode_equal(modes[j], modes[i])) + if (!drm_mode_match(modes[j], modes[i], + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) can_clone = false; } } @@ -2203,7 +2207,11 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper, fb_helper_conn = fb_helper->connector_info[i]; list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { - if (drm_mode_equal(mode, dmt_mode)) + if (drm_mode_match(mode, dmt_mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) modes[i] = mode; } if (!modes[i]) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index c395a244f665..7dfabdd6bcc8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1049,7 +1049,8 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_FLAGS | - DRM_MODE_MATCH_3D_FLAGS); + DRM_MODE_MATCH_3D_FLAGS| + DRM_MODE_MATCH_ASPECT_RATIO); } EXPORT_SYMBOL(drm_mode_equal); @@ -1647,6 +1648,20 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; + + switch (in->picture_aspect_ratio) { + case HDMI_PICTURE_ASPECT_4_3: + out->flags |= DRM_MODE_FLAG_PIC_AR_4_3; + break; + case HDMI_PICTURE_ASPECT_16_9: + out->flags |= DRM_MODE_FLAG_PIC_AR_16_9; + break; + case HDMI_PICTURE_ASPECT_RESERVED: + default: + out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; + break; + } + strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } @@ -1693,6 +1708,24 @@ int drm_mode_convert_umode(struct drm_device *dev, strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; + /* Clearing picture aspect ratio bits from out flags, + * as the aspect-ratio information is not stored in + * flags for kernel-mode, but in picture_aspect_ratio. + */ + out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; + + switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) { + case DRM_MODE_FLAG_PIC_AR_4_3: + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3; + break; + case DRM_MODE_FLAG_PIC_AR_16_9: + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9; + break; + default: + out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; + break; + } + out->status = drm_mode_validate_driver(dev, out); if (out->status != MODE_OK) return -EINVAL; -- cgit v1.2.3 From 900aa8ad21587e909603f471b6cd81fd5338ec45 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Tue, 8 May 2018 16:39:45 +0530 Subject: drm: Add and handle new aspect ratios in DRM layer HDMI 2.0/CEA-861-F introduces two new aspect ratios: - 64:27 - 256:135 This patch: - Adds new DRM flags for to represent these new aspect ratios. - Adds new cases to handle these aspect ratios while converting from user->kernel mode or vise versa. This patch was once reviewed and merged, and later reverted due to lack of DRM client protection, while adding aspect ratio bits in user modes. This is a re-spin of the series, with DRM client cap protection. The previous series can be found here: https://pw-emeril.freedesktop.org/series/10850/ Signed-off-by: Shashank Sharma Reviewed-by: Sean Paul (V2) Reviewed-by: Jose Abreu (V2) Cc: Ville Syrjala Cc: Sean Paul Cc: Jose Abreu Cc: Ankit Nautiyal V3: rebase V4: rebase V5: corrected the macro name for an aspect ratio, in a switch case. V6: rebase V7: rebase V8: rebase V9: rebase V10: rebase V11: rebase V12: rebase V13: rebase V14: rebase Acked-by: Daniel Vetter Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1525777785-9740-11-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/drm_modes.c | 12 ++++++++++++ include/uapi/drm/drm_mode.h | 6 ++++++ 2 files changed, 18 insertions(+) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7dfabdd6bcc8..c78ca0e84ffd 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1656,6 +1656,12 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, case HDMI_PICTURE_ASPECT_16_9: out->flags |= DRM_MODE_FLAG_PIC_AR_16_9; break; + case HDMI_PICTURE_ASPECT_64_27: + out->flags |= DRM_MODE_FLAG_PIC_AR_64_27; + break; + case HDMI_PICTURE_ASPECT_256_135: + out->flags |= DRM_MODE_FLAG_PIC_AR_256_135; + break; case HDMI_PICTURE_ASPECT_RESERVED: default: out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; @@ -1721,6 +1727,12 @@ int drm_mode_convert_umode(struct drm_device *dev, case DRM_MODE_FLAG_PIC_AR_16_9: out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9; break; + case DRM_MODE_FLAG_PIC_AR_64_27: + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27; + break; + case DRM_MODE_FLAG_PIC_AR_256_135: + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135; + break; default: out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; break; diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 50bcf4214ff9..4b3a1bb58e68 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -93,6 +93,8 @@ extern "C" { #define DRM_MODE_PICTURE_ASPECT_NONE 0 #define DRM_MODE_PICTURE_ASPECT_4_3 1 #define DRM_MODE_PICTURE_ASPECT_16_9 2 +#define DRM_MODE_PICTURE_ASPECT_64_27 3 +#define DRM_MODE_PICTURE_ASPECT_256_135 4 /* Aspect ratio flag bitmask (4 bits 22:19) */ #define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19) @@ -102,6 +104,10 @@ extern "C" { (DRM_MODE_PICTURE_ASPECT_4_3<<19) #define DRM_MODE_FLAG_PIC_AR_16_9 \ (DRM_MODE_PICTURE_ASPECT_16_9<<19) +#define DRM_MODE_FLAG_PIC_AR_64_27 \ + (DRM_MODE_PICTURE_ASPECT_64_27<<19) +#define DRM_MODE_FLAG_PIC_AR_256_135 \ + (DRM_MODE_PICTURE_ASPECT_256_135<<19) #define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \ DRM_MODE_FLAG_NHSYNC | \ -- cgit v1.2.3 From 68266f1c08db731fa7c3a0903bf890fc76ce9345 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Tue, 17 Apr 2018 19:08:44 +0530 Subject: gpu: drm: qxl: Adding new typedef vm_fault_t Use new return type vm_fault_t for fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. Reference id -> 1c8f422059ae ("mm: change return type to vm_fault_t") Signed-off-by: Souptick Joarder Link: http://patchwork.freedesktop.org/patch/msgid/20180417133844.GA30256@jordon-HP-15-Notebook-PC Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/qxl/qxl_ttm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index ee2340e31f06..86a1fb32f6db 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -105,16 +105,16 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev) static struct vm_operations_struct qxl_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops; -static int qxl_ttm_fault(struct vm_fault *vmf) +static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo; - int r; + vm_fault_t ret; bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; if (bo == NULL) return VM_FAULT_NOPAGE; - r = ttm_vm_ops->fault(vmf); - return r; + ret = ttm_vm_ops->fault(vmf); + return ret; } int qxl_mmap(struct file *filp, struct vm_area_struct *vma) -- cgit v1.2.3