summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c148
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c35
4 files changed, 175 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index d37c6d272a2e..80e7154f17c7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -385,6 +385,17 @@ static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = ~SMI,
+ .expected = (MI_BATCH_PPGTT_HSW | 1),
+ }}, ),
};
#undef CMD
@@ -1012,7 +1023,7 @@ unpin_src:
return ret ? ERR_PTR(ret) : dst;
}
-static bool check_cmd(const struct intel_engine_cs *ring,
+static int check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
bool *oacontrol_set)
@@ -1122,15 +1133,114 @@ static bool check_cmd(const struct intel_engine_cs *ring,
return true;
}
+static int check_bbstart(struct intel_context *ctx,
+ u32 *cmd, u64 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct intel_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ memset(ctx->jump_whitelist, 0,
+ BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @user_batch_start: Canonical base address of original user batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1138,13 +1248,16 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_context *ctx,
+ struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
- u32 batch_len)
+ u32 batch_len,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_base, *batch_end;
+ u32 *cmd, *batch_base, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
int ret = 0;
@@ -1156,6 +1269,8 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
return PTR_ERR(batch_base);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1179,16 +1294,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
- /*
- * We don't try to handle BATCH_BUFFER_START because it adds
- * non-trivial complexity. Instead we abort the scan and return
- * and error to indicate that the batch is unsafe.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
- }
-
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
@@ -1208,7 +1313,18 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, user_batch_start,
+ shadow_batch_start);
+ break;
+ }
+
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
+
cmd += length;
+ offset += length;
}
if (oacontrol_set) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6064ca4aaaef..2a815dc2af91 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,12 @@ struct intel_context {
int pin_count;
} engine[I915_NUM_RINGS];
+ /* jump_whitelist: Bit array for tracking cmds during cmdparsing */
+ unsigned long *jump_whitelist;
+
+ /* jump_whitelist_cmds: No of cmd slots available */
+ uint32_t jump_whitelist_cmds;
+
struct list_head link;
};
@@ -3289,11 +3295,15 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_context *cxt,
+ struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
- u32 batch_len);
+ u32 batch_len,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
+
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 0433d25f9d23..20fb0ee1df4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,6 +157,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (i915.enable_execlists)
intel_lr_context_free(ctx);
+ kfree(ctx->jump_whitelist);
+
/*
* This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which
@@ -246,6 +248,9 @@ __create_hw_context(struct drm_device *dev,
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
return ctx;
err_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 556849350f27..c373c45ae3d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1154,7 +1154,8 @@ shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
}
static struct drm_i915_gem_object*
-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+i915_gem_execbuffer_parse(struct intel_context *ctx,
+ struct intel_engine_cs *ring,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct i915_address_space *vm,
@@ -1164,6 +1165,10 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
{
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
+ struct i915_vma *user_vma = list_entry(eb->vmas.prev,
+ typeof(*user_vma), exec_list);
+ u64 batch_start;
+ u64 shadow_batch_start;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
@@ -1171,20 +1176,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
- ret = i915_parse_cmds(ring,
- batch_obj,
- shadow_batch_obj,
- batch_start_offset,
- batch_len);
- if (ret)
- goto err;
-
vma = shadow_batch_pin(shadow_batch_obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
+ batch_start = user_vma->node.start + batch_start_offset;
+
+ shadow_batch_start = vma->node.start;
+
+ ret = i915_parse_cmds(ctx,
+ ring,
+ batch_obj,
+ batch_start,
+ batch_start_offset,
+ batch_len,
+ shadow_batch_obj,
+ shadow_batch_start);
+ if (ret) {
+ WARN_ON(vma->pin_count == 0);
+ vma->pin_count--;
+ goto err;
+ }
+
i915_gem_object_unpin_pages(shadow_batch_obj);
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
@@ -1545,7 +1560,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (batch_len == 0)
batch_len = batch_obj->base.size - batch_off;
- parsed_batch_obj = i915_gem_execbuffer_parse(ring,
+ parsed_batch_obj = i915_gem_execbuffer_parse(ctx, ring,
&shadow_exec_entry,
eb, vm,
batch_obj,