summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
authorYan Zhao <yan.y.zhao@intel.com>2020-04-15 06:58:27 +0300
committerZhenyu Wang <zhenyuw@linux.intel.com>2020-04-15 09:02:55 +0300
commite5e113079efdffb9a39e16a88d109c3d47efdfcc (patch)
tree40b2d8b4f0cbb6d0a8f3ea0d388ad53665e3b30a /drivers/gpu/drm/i915/gvt/scheduler.c
parent6c2f73e26a253ae827d9754572bfee4a912e559c (diff)
downloadlinux-e5e113079efdffb9a39e16a88d109c3d47efdfcc.tar.xz
drm/i915/gvt: combine access to consecutive guest context pages
IOVA(GPA)s of context pages are checked and if they are consecutive, read/write them together in one intel_gvt_hypervisor_read_gpa() / intel_gvt_hypervisor_write_gpa(). Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20200415035827.26476-1-yan.y.zhao@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c58
1 files changed, 48 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4639a56f9a3c..f650ad3367b6 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -133,6 +133,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
void *dst;
void *context_base;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs */
int i;
GEM_BUG_ON(!intel_context_is_pinned(ctx));
@@ -186,8 +188,11 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
- i = 2;
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * read from the continuous GPAs into dst virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -196,10 +201,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto read;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto read;
+
+ continue;
+
+read:
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- i++;
}
return 0;
}
@@ -789,6 +808,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
void *context_base;
void *src;
unsigned long context_gpa, context_page_num;
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
+ unsigned long gpa_size; /* size of consecutive GPAs*/
int i;
u32 ring_base;
u32 head, tail;
@@ -822,11 +843,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
- i = 2;
context_base = (void *) ctx->lrc_reg_state -
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
- while (i < context_page_num) {
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
+ * write to the consecutive GPAs from src virtual address
+ */
+ gpa_size = 0;
+ for (i = 2; i < context_page_num; i++) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
I915_GTT_PAGE_SHIFT));
@@ -835,10 +859,24 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
+ if (gpa_size == 0) {
+ gpa_base = context_gpa;
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
+ } else if (context_gpa != gpa_base + gpa_size)
+ goto write;
+
+ gpa_size += I915_GTT_PAGE_SIZE;
+
+ if (i == context_page_num - 1)
+ goto write;
+
+ continue;
+
+write:
+ intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+ gpa_base = context_gpa;
+ gpa_size = I915_GTT_PAGE_SIZE;
src = context_base + (i << I915_GTT_PAGE_SHIFT);
- intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
- I915_GTT_PAGE_SIZE);
- i++;
}
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +