summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2019-08-22 08:47:35 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2019-08-22 10:10:36 +0300
commit829e8def7bd7b1e58028113ee5c2877da89d8f27 (patch)
tree3958810e8e17c508172889c02e75d9dc2ec6080c /drivers/gpu/drm/i915
parent8e40983dec63300013ff5755c36e3ac455e351a5 (diff)
parentae4530062620561d24683b1bd3438b8397693429 (diff)
downloadlinux-829e8def7bd7b1e58028113ee5c2877da89d8f27.tar.xz
Merge drm/drm-next into drm-intel-next-queued
We need the rename of reservation_object to dma_resv. The solution on this merge came from linux-next: From: Stephen Rothwell <sfr@canb.auug.org.au> Date: Wed, 14 Aug 2019 12:48:39 +1000 Subject: [PATCH] drm: fix up fallout from "dma-buf: rename reservation_object to dma_resv" Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> --- drivers/gpu/drm/i915/gt/intel_engine_pool.c | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 03d90b49584a..4cd54c569911 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -43,12 +43,12 @@ static int pool_active(struct i915_active *ref) { struct intel_engine_pool_node *node = container_of(ref, typeof(*node), active); - struct reservation_object *resv = node->obj->base.resv; + struct dma_resv *resv = node->obj->base.resv; int err; - if (reservation_object_trylock(resv)) { - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); + if (dma_resv_trylock(resv)) { + dma_resv_add_excl_fence(resv, NULL); + dma_resv_unlock(resv); } err = i915_gem_object_pin_pages(node->obj); which is a simplified version from a previous one which had: Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c59
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1
-rw-r--r--drivers/gpu/drm/i915/i915_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h8
36 files changed, 337 insertions, 132 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 02cede79e552..45add812048b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra
subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
-subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index b5ee84437f50..8eb2b3ec01ed 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3575,7 +3575,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ (u8)conn_state->hdcp_content_type);
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -3644,15 +3645,41 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
- else if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- intel_hdcp_disable(to_intel_connector(conn_state->connector));
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index fa60e64a15ff..b51d1ceb8739 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,7 +29,7 @@
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
@@ -12185,7 +12185,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
- /* else: fall through */
+ /* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
@@ -14414,7 +14414,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index db1db61fea24..6ec5ceeab601 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -867,7 +867,6 @@ static void intel_hdcp_prop_work(struct work_struct *work)
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
- struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
@@ -877,10 +876,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state = connector->base.state;
- state->content_protection = hdcp->value;
- }
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ drm_hdcp_update_content_protection(&connector->base,
+ hdcp->value);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -1751,14 +1749,15 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+static inline int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
data->port = connector->encoder->port;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
- data->protocol = (u8)hdcp->shim->protocol;
+ data->protocol = (u8)shim->protocol;
data->k = 1;
if (!data->streams)
@@ -1808,12 +1807,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector)
+static void intel_hdcp2_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector);
+ ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
DRM_DEBUG_KMS("Mei hdcp data init failed\n");
return;
@@ -1832,23 +1832,28 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- ret = drm_connector_attach_content_protection_property(&connector->base);
- if (ret)
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, shim);
+
+ ret =
+ drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
+ if (ret) {
+ hdcp->hdcp2_supported = false;
+ kfree(hdcp->port_data.streams);
return ret;
+ }
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-
- if (is_hdcp2_supported(dev_priv))
- intel_hdcp2_init(connector);
init_waitqueue_head(&hdcp->cp_irq_queue);
return 0;
}
-int intel_hdcp_enable(struct intel_connector *connector)
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
{
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
@@ -1859,6 +1864,7 @@ int intel_hdcp_enable(struct intel_connector *connector)
mutex_lock(&hdcp->mutex);
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ hdcp->content_type = content_type;
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -1870,8 +1876,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
}
- /* When HDCP2.2 fails, HDCP1.4 will be attempted */
- if (ret && intel_hdcp_capable(connector)) {
+ /*
+ * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+ * be attempted.
+ */
+ if (ret && intel_hdcp_capable(connector) &&
+ hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = _intel_hdcp_enable(connector);
}
@@ -1953,12 +1963,15 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/*
* Nothing to do if the state didn't change, or HDCP was activated since
- * the last commit
+ * the last commit. And also no change in hdcp content type.
*/
if (old_cp == new_cp ||
(old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
- return;
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+ if (old_state->hdcp_content_type ==
+ new_state->hdcp_content_type)
+ return;
+ }
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index be8da85c866a..13555b054930 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -21,7 +21,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_state);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
+ struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 835d6756ba49..fb0ef176ba5b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -147,7 +147,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
- reservation_object_add_excl_fence(obj->base.resv,
+ dma_resv_add_excl_fence(obj->base.resv,
&clflush->dma);
i915_sw_fence_commit(&clflush->wait);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 61d0ca5c5741..f99920652751 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -300,7 +300,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index cbf1701d3acc..96ce95c8ac5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -204,8 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +221,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index e0693bb81452..a5c3fbb53b63 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -1266,7 +1266,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true))) {
+ !dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index c788f86faba3..2f6100ec2608 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -77,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
- reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index acf6f9dc907e..dba5dd779149 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -339,7 +339,7 @@ err:
*/
if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
- /* else: fall through */
+ /* else, fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 0807bb5464cf..d7855dc5a5c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -140,7 +140,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- reservation_object_fini(&obj->base._resv);
+ dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index abc23e7e13a7..5efb9936e05b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
- reservation_object_lock(obj->base.resv, NULL);
+ dma_resv_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
- return reservation_object_lock_interruptible(obj->base.resv, NULL);
+ return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- reservation_object_unlock(obj->base.resv);
+ dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -367,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 92ad3cc220e3..18f0ce0135c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -257,7 +257,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough to use PAGE_KERNEL anyway */
+ /* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 26ec6579b7cd..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,11 +31,10 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
}
static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
@@ -44,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
/*
* Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
+ * signaled.
*/
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (prune_fences && dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
}
return timeout;
@@ -144,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -156,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e3a64edef918..d85d1ce273ca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -20,7 +20,7 @@ static int igt_dmabuf_export(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
@@ -44,7 +44,7 @@ static int igt_dmabuf_import_self(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -219,7 +219,7 @@ static int igt_dmabuf_export_vmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -266,7 +266,7 @@ static int igt_dmabuf_export_kmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 2bc9c460e78d..09c68dda2098 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -114,18 +114,18 @@ __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
}
static void
-__dma_fence_signal__notify(struct dma_fence *fence)
+__dma_fence_signal__notify(struct dma_fence *fence,
+ const struct list_head *list)
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
lockdep_assert_irqs_disabled();
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- INIT_LIST_HEAD(&fence->cb_list);
}
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -187,11 +187,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
-
- __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ struct list_head cb_list;
spin_lock(&rq->lock);
- __dma_fence_signal__notify(&rq->fence);
+ list_replace(&rq->fence.cb_list, &cb_list);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ __dma_fence_signal__notify(&rq->fence, &cb_list);
spin_unlock(&rq->lock);
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 03d90b49584a..4cd54c569911 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -43,12 +43,12 @@ static int pool_active(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
container_of(ref, typeof(*node), active);
- struct reservation_object *resv = node->obj->base.resv;
+ struct dma_resv *resv = node->obj->base.resv;
int err;
- if (reservation_object_trylock(resv)) {
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
+ if (dma_resv_trylock(resv)) {
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
}
err = i915_gem_object_pin_pages(node->obj);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 58afd36617db..e753b1e706e2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2792,11 +2792,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2842,11 +2837,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.workload = workload;
s.is_ctx_wa = true;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 41c8ebc60c63..13044c027f27 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -491,7 +491,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj->gvt_info = dmabuf_obj->info;
- dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
gvt_vgpu_err("export dma-buf failed\n");
ret = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..8bb292b01271 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..4b04af569c05 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
+ gma = index << I915_GTT_PAGE_SHIFT;
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ gma, 1 << I915_GTT_PAGE_SHIFT)) {
+ gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
+ memset(p_data, 0, bytes);
+ return 0;
+ }
+
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d91136b39dad..343d79c1cb7e 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1906,6 +1906,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
+ } else if (entry->size != size) {
+ /* the same gfn with different size: unmap and re-map */
+ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
+
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index dfb64a6ec838..1a28e3666951 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -361,16 +361,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
-static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
- if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -EINVAL;
-
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
@@ -381,8 +378,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- return 0;
}
static int
@@ -611,6 +606,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -619,8 +616,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return -EINVAL;
+ }
+
update_shadow_pdps(workload);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -671,7 +676,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -682,13 +686,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload,
- s->shadow[ring_id]->gem_context);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- goto err_req;
- }
-
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -987,6 +984,7 @@ static int workload_thread(void *priv)
int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
@@ -1010,6 +1008,8 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
+ intel_runtime_pm_get(rpm);
+
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1039,6 +1039,7 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ intel_runtime_pm_put_unchecked(rpm);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1506,6 +1507,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+ if (!intel_gvt_ggtt_validate_range(vgpu, start,
+ _RING_CTL_BUF_SIZE(ctl))) {
+ gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+ return ERR_PTR(-EINVAL);
+ }
+
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
@@ -1530,9 +1537,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
+
+ if (workload->wa_ctx.indirect_ctx.size != 0) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.indirect_ctx.guest_gma,
+ workload->wa_ctx.indirect_ctx.size)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+ workload->wa_ctx.indirect_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ if (workload->wa_ctx.per_ctx.valid) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.per_ctx.guest_gma,
+ CACHELINE_BYTES)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+ workload->wa_ctx.per_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
index a3deed692b9c..fe552e877e09 100644
--- a/drivers/gpu/drm/i915/gvt/trace_points.c
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -28,8 +28,6 @@
*
*/
-#include "trace.h"
-
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index defe27bfcde5..b5b2a64753e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2838,9 +2838,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
@@ -2850,7 +2850,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f506cda92f1e..eb31c1656cea 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,7 +43,7 @@
#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
@@ -2332,8 +2332,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags);
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 763b3c92e3e3..68976689d569 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
new file mode 100644
index 000000000000..8675a608a6fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -0,0 +1,132 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "i915_gem_batch_pool.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: batch pool
+ *
+ * In order to submit batch buffers as 'secure', the software command parser
+ * must ensure that a batch buffer cannot be modified after parsing. It does
+ * this by copying the user provided batch buffer contents to a kernel owned
+ * buffer from which the hardware will actually execute, and by carefully
+ * managing the address space bindings for such buffers.
+ *
+ * The batch pool framework provides a mechanism for the driver to manage a
+ * set of scratch buffers to use for this purpose. The framework can be
+ * extended to support other uses cases should they arise.
+ */
+
+/**
+ * i915_gem_batch_pool_init() - initialize a batch buffer pool
+ * @pool: the batch buffer pool
+ * @engine: the associated request submission engine
+ */
+void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
+ struct intel_engine_cs *engine)
+{
+ int n;
+
+ pool->engine = engine;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+/**
+ * i915_gem_batch_pool_fini() - clean up a batch buffer pool
+ * @pool: the pool to clean up
+ *
+ * Note: Callers must hold the struct_mutex.
+ */
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
+{
+ int n;
+
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &pool->cache_list[n],
+ batch_pool_link)
+ i915_gem_object_put(obj);
+
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+ }
+}
+
+/**
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
+ * @pool: the batch buffer pool
+ * @size: the minimum desired size of the returned buffer
+ *
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
+ *
+ * Note: Callers must hold the struct_mutex
+ *
+ * Return: the buffer object or an error pointer
+ */
+struct drm_i915_gem_object *
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
+ size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ struct list_head *list;
+ int n, ret;
+
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
+
+ /* Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(size >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+ list = &pool->cache_list[n];
+
+ list_for_each_entry(obj, list, batch_pool_link) {
+ struct dma_resv *resv = obj->base.resv;
+
+ /* The batches are strictly LRU ordered */
+ if (!dma_resv_test_signaled_rcu(resv, true))
+ break;
+
+ /*
+ * The object is now idle, clear the array of shared
+ * fences before we add a new request. Although, we
+ * remain on the same engine, we may be on a different
+ * timeline and so may continually grow the array,
+ * trapping a reference to all the old fences, rather
+ * than replace the existing fence.
+ */
+ if (rcu_access_pointer(resv->fence)) {
+ dma_resv_lock(resv, NULL);
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+
+ if (obj->base.size >= size)
+ goto found;
+ }
+
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
+ if (IS_ERR(obj))
+ return obj;
+
+found:
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ list_move_tail(&obj->batch_pool_link, list);
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 52f22e88402e..e284bd76fa86 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1111,6 +1111,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
switch (engine->id) {
default:
MISSING_CASE(engine->id);
+ /* fall through */
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 934ea9592908..f1a0a57fc6fc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1014,7 +1014,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1031,7 +1031,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index dedacafc9442..6a88db291252 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_sw_fence.h"
#include "i915_selftest.h"
@@ -523,7 +523,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
}
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -539,7 +539,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -564,7 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 518cbaad9bea..ab7d58bd0b9d 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,7 +16,7 @@
#include <linux/wait.h>
struct completion;
-struct reservation_object;
+struct dma_resv;
struct i915_sw_fence {
wait_queue_head_t wait;
@@ -83,7 +83,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 252edef6c59e..79f9d1fb7611 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -912,15 +912,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
rq->timeline,
rq);
- reservation_object_add_excl_fence(vma->resv, &rq->fence);
+ dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
} else {
- err = reservation_object_reserve_shared(vma->resv, 1);
+ err = dma_resv_reserve_shared(vma->resv, 1);
if (unlikely(err))
return err;
- reservation_object_add_shared_fence(vma->resv, &rq->fence);
+ dma_resv_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 59dfe5246001..9e7d8f4154b2 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,7 +55,7 @@ struct i915_vma {
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
struct i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
+ struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
void *private; /* owned by creator */
@@ -306,16 +306,16 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
- reservation_object_lock(vma->resv, NULL);
+ dma_resv_lock(vma->resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
- reservation_object_unlock(vma->resv);
+ dma_resv_unlock(vma->resv);
}
int __i915_vma_do_pin(struct i915_vma *vma,