summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c122
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c30
8 files changed, 136 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index e0667d163266..40da7910f845 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -262,6 +262,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->preload_luts = false;
crtc_state->inherited = false;
crtc_state->wm.need_postvbl_update = false;
+ crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
crtc_state->dsb = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index c53aa6a4c7a0..5712688232fb 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -110,7 +110,6 @@ intel_plane_duplicate_state(struct drm_plane *plane)
intel_state->ggtt_vma = NULL;
intel_state->dpt_vma = NULL;
intel_state->flags = 0;
- intel_state->do_async_flip = false;
/* add reference to fb */
if (intel_state->hw.fb)
@@ -506,7 +505,7 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
new_crtc_state->disable_lp_wm = true;
if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
- new_plane_state->do_async_flip = true;
+ new_crtc_state->do_async_flip = true;
return 0;
}
@@ -678,7 +677,7 @@ void intel_plane_update_arm(struct intel_plane *plane,
trace_intel_plane_update_arm(&plane->base, crtc);
- if (plane_state->do_async_flip)
+ if (crtc_state->do_async_flip && plane->async_flip)
plane->async_flip(plane, crtc_state, plane_state, true);
else
plane->update_arm(plane, crtc_state, plane_state);
@@ -703,7 +702,7 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
struct intel_plane *plane;
int i;
- if (new_crtc_state->uapi.async_flip)
+ if (new_crtc_state->do_async_flip)
return;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 08ee3e17ee5c..65827481c1b1 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -485,7 +485,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
- if (new_crtc_state->uapi.async_flip)
+ if (new_crtc_state->do_async_flip)
return;
if (intel_crtc_needs_vblank_work(new_crtc_state))
@@ -630,7 +630,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (new_crtc_state->uapi.async_flip)
+ if (new_crtc_state->do_async_flip)
return;
trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 80b19c304c43..7dfeb458aa65 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -346,7 +346,10 @@ static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state
u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
+ if (crtc_state->bigjoiner_pipes)
+ return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
+ else
+ return 0;
}
bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
@@ -1260,10 +1263,8 @@ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->enable_flip_done &&
- plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id) &&
- plane_state->do_async_flip)
+ if (plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
plane->enable_flip_done(plane);
}
}
@@ -1279,10 +1280,8 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->disable_flip_done &&
- plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id) &&
- plane_state->do_async_flip)
+ if (plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
plane->disable_flip_done(plane);
}
}
@@ -7398,7 +7397,7 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state,
* Correspondingly, support is currently added for primary plane only.
*
* Async flip can only change the plane surface address, so anything else
- * changing is rejected from the intel_atomic_check_async() function.
+ * changing is rejected from the intel_async_flip_check_hw() function.
* Once this check is cleared, flip done interrupt is enabled using
* the intel_crtc_enable_flip_done() function.
*
@@ -7408,7 +7407,65 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state,
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
*/
-static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
+static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return 0;
+
+ if (!new_crtc_state->uapi.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] modeset required\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ /*
+ * TODO: Async flip is only supported through the page flip IOCTL
+ * as of now. So support currently added for primary plane only.
+ * Support for other planes on platforms on which supports
+ * this(vlv/chv and icl+) should be added when async flip is
+ * enabled in the atomic IOCTL path.
+ */
+ if (!plane->async_flip) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] async flip not supported\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] no old or new framebuffer\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -7419,6 +7476,9 @@ static int intel_atomic_check_async(struct intel_atomic_state *state, struct int
old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->uapi.async_flip)
+ return 0;
+
if (intel_crtc_needs_modeset(new_crtc_state)) {
drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
return -EINVAL;
@@ -7440,16 +7500,26 @@ static int intel_atomic_check_async(struct intel_atomic_state *state, struct int
continue;
/*
- * TODO: Async flip is only supported through the page flip IOCTL
- * as of now. So support currently added for primary plane only.
- * Support for other planes on platforms on which supports
- * this(vlv/chv and icl+) should be added when async flip is
- * enabled in the atomic IOCTL path.
+ * Only async flip capable planes should be in the state
+ * if we're really about to ask the hardware to perform
+ * an async flip. We should never get this far otherwise.
*/
- if (!plane->async_flip)
+ if (drm_WARN_ON(&i915->drm,
+ new_crtc_state->do_async_flip && !plane->async_flip))
return -EINVAL;
/*
+ * Only check async flip capable planes other planes
+ * may be involved in the initial commit due to
+ * the wm0/ddb optimization.
+ *
+ * TODO maybe should track which planes actually
+ * were requested to do the async flip...
+ */
+ if (!plane->async_flip)
+ continue;
+
+ /*
* FIXME: This check is kept generic for all platforms.
* Need to verify this for all gen9 platforms to enable
* this selectively if required.
@@ -7613,6 +7683,12 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = intel_async_flip_check_uapi(state, crtc);
+ if (ret)
+ return ret;
+ }
+
ret = intel_bigjoiner_add_affected_crtcs(state);
if (ret)
goto fail;
@@ -7769,11 +7845,9 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->uapi.async_flip) {
- ret = intel_atomic_check_async(state, crtc);
- if (ret)
- goto fail;
- }
+ ret = intel_async_flip_check_hw(state, crtc);
+ if (ret)
+ goto fail;
if (!intel_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
@@ -8395,7 +8469,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_dbuf_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->uapi.async_flip)
+ if (new_crtc_state->do_async_flip)
intel_crtc_enable_flip_done(state, crtc);
}
@@ -8421,7 +8495,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_flip_done(dev, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->uapi.async_flip)
+ if (new_crtc_state->do_async_flip)
intel_crtc_disable_flip_done(state, crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index b50d0e6efe21..776b3e6662f2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -613,9 +613,6 @@ struct intel_plane_state {
struct intel_fb_view view;
- /* Indicates if async flip is required */
- bool do_async_flip;
-
/* Plane pxp decryption state */
bool decrypt;
@@ -951,6 +948,9 @@ struct intel_crtc_state {
bool preload_luts;
bool inherited; /* state inherited from BIOS? */
+ /* Ask the hardware to actually async flip? */
+ bool do_async_flip;
+
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4bcdfcab3642..a5f5b2dda332 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -234,7 +234,8 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
ARRAY_SIZE(vm->min_alignment));
- if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915)) {
+ if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
+ subclass == VM_CLASS_PPGTT) {
vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
} else if (HAS_64K_PAGES(vm->i915)) {
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e8d6c76e9234..057ec4490104 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -188,14 +188,29 @@ static ssize_t description_show(struct mdev_type *mtype,
type->weight);
}
+static ssize_t name_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ struct intel_vgpu_type *type;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "%s\n", type->name);
+}
+
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
+static MDEV_TYPE_ATTR_RO(name);
static struct attribute *gvt_type_attrs[] = {
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
+ &mdev_type_attr_name.attr,
NULL,
};
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5af16ca4dabd..71f7fba2c9e2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5145,12 +5145,15 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
const struct skl_wm_level *wm,
u64 data_rate)
{
- u16 extra;
+ u16 extra = 0;
- extra = min_t(u16, iter->size,
- DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate));
- iter->size -= extra;
- iter->data_rate -= data_rate;
+ if (data_rate) {
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate,
+ iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+ }
return wm->min_ddb_alloc + extra;
}
@@ -5193,9 +5196,6 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR],
alloc->end - iter.total[PLANE_CURSOR], alloc->end);
- if (iter.data_rate == 0)
- return 0;
-
/*
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
@@ -5234,6 +5234,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
return -EINVAL;
}
+ /* avoid the WARN later when we don't allocate any extra DDB */
+ if (iter.data_rate == 0)
+ iter.size = 0;
+
/*
* Grant each plane the blocks it requires at the highest achievable
* watermark level, plus an extra share of the leftover blocks
@@ -5246,20 +5250,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- /*
- * We've accounted for all active planes; remaining planes are
- * all disabled.
- */
- if (iter.data_rate == 0)
- break;
-
iter.total[plane_id] =
skl_allocate_plane_ddb(&iter, &wm->wm[level],
crtc_state->plane_data_rate[plane_id]);
- if (iter.data_rate == 0)
- break;
-
iter.uv_total[plane_id] =
skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
crtc_state->uv_plane_data_rate[plane_id]);