summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 19:23:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 19:23:15 +0300
commit851ca779d110f694b5d078bc4af06d3ad37169e8 (patch)
tree3d03de09e44ef02a6f73924f32fa21646347e64e /drivers/gpu/drm/i915/gvt
parentb5dd0c658c31b469ccff1b637e5124851e7a4a1c (diff)
parent4b057e73f28f1df13b77b77a52094238ffdf8abd (diff)
downloadlinux-851ca779d110f694b5d078bc4af06d3ad37169e8.tar.xz
Merge tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main drm pull request for the 5.1 merge window. The big changes I'd highlight are: - nouveau has HMM support now, there is finally an in-tree user so we can quieten down the rip it out people. - i915 now enables fastboot by default on Skylake+ - Displayport Multistream support has been refactored and should hopefully be more reliable. Core: - header cleanups aiming towards removing drmP.h - dma-buf fence seqnos to 64-bits - common helper for DP mst hotplug for radeon,i915,amdgpu + new refcounting scheme - MST i2c improvements - drm_syncobj_cb removal - ARM FB compression fourcc - P010 + P016 fourcc - allwinner tiled format modifier - i2c over aux I2C_M_STOP support - DRM_AUTH handling fixes TTM: - ref/unref renaming New driver: - ARM komeda display driver scheduler: - refactor mirror list handling - rework hw fence processing - 0 run queue entity fix bridge: - TI DS90C185 LVDS bridge - thc631lvdm83d bridge improvements - cadence + allwinner DSI ported to generic phy panels: - Sitronix ST7701 panel - Kingdisplay KD097D04 - LeMaker BL035-RGB-002 - PDA 91-00156-A0 - Innolux EE101IA-01D i915: - Enable fastboot by default on SKL+/VLV/CHV - Export RPCS configuration for ICL media driver - Coffelake PCI ID - CNL clocks setup fixes - ACPI/PMIC support for MIPI/DSI - Per-engine WA init for all engines - Shrinker locking fixes - Kerneldoc updates - Lots of ring improvements and reset fixes - Coffeelake GVT Support - VFIO GVT EDID Region support - runtime PM wakeref tracking - ILK->IVB primary plane enable delays - userptr mutex locking fixes - DSI fixes - LVDS/TV cleanups - HW readout fixes - LUT robustness fixes - ICL display and watermark fixes - gem mmap race fix amdgpu: - add scheduled dependencies interface - DCC on scanout surfaces - vega10/20 BACO support - Multiple IH rings on soc15 - XGMI locking fixes - DC i2c/aux cleanups - runtime SMU debug interface - Kexec improvmeents - SR-IOV fixes - DC freesync + ABM fixes - GDS fixes - GPUVM fixes - vega20 PCIE DPM switching fixes - Context priority handling fixes radeon: - fix missing break in evergreen parser nouveau: - SVM support via HMM msm: - QCOM Compressed modifier support exynos: - s5pv210 rotator support imx: - zpos property support - pending update fixes v3d: - cache flush improvments vc4: - reflection support - HDMI overscan support tegra: - CEC refactoring - HDMI audio fixes - Tegra186 prep work - SOR crossbar device tree fixes sun4i: - implicit fencing support - YUV and scalar support improvements - A23 support - tiling fixes atmel-hlcdc: - clipping and rotation property fixes qxl: - BO and PRIME improvements - generic fbdev emulation dw-hdmi: - HDMI 2.0 2160p - YUV420 ouput rockchip: - implicit fencing support - reflection proerties virtio-gpu: - use generic fbdev emulation tilcdc: - cpufreq vs crtc init fix rcar-du: - R8A774C0 support - D3/E3 RGB output routing fixes and DPAD0 support - RA87744 LVDS support bochs: - atomic and generic fbdev emulation - ID mismatch error on bochs load meson: - remove firmware fbs" * tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm: (1130 commits) drm/amd/display: Use vrr friendly pageflip throttling in DC. drm/imx: only send commit done event when all state has been applied drm/imx: allow building under COMPILE_TEST drm/imx: imx-tve: depend on COMMON_CLK drm/imx: ipuv3-plane: add zpos property drm/imx: ipuv3-plane: add function to query atomic update status gpu: ipu-v3: prg: add function to get channel configure status gpu: ipu-v3: pre: add double buffer status readback drm/amdgpu: Bump amdgpu version for context priority override. drm/amdgpu/powerplay: fix typo in BACO header guards drm/amdgpu/powerplay: fix return codes in BACO code drm/amdgpu: add missing license on baco files drm/bochs: Fix the ID mismatch error drm/nouveau/dmem: use dma addresses during migration copies drm/nouveau/dmem: use physical vram addresses during migration copies drm/nouveau/dmem: extend copy function to allow direct use of physical addresses drm/nouveau/svm: new ioctl to migrate process memory to GPU memory drm/nouveau/dmem: device memory helpers for SVM drm/nouveau/svm: initial support for shared virtual memory drm/nouveau: prepare for enabling svm with existing userspace interfaces ...
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c83
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h37
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c185
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h30
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c10
23 files changed, 443 insertions, 220 deletions
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index b016dc753db9..271fb46d4dd0 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 359d37d5c958..1fa2f65c3cd1 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
@@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +219,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
@@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 77ae634eb11c..35b4ec3f7618 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -55,10 +55,10 @@ struct sub_op_bits {
int low;
};
struct decode_info {
- char *name;
+ const char *name;
int op_len;
int nr_sub_op;
- struct sub_op_bits *sub_op;
+ const struct sub_op_bits *sub_op;
};
#define MAX_CMD_BUDGET 0x7fffffff
@@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
struct cmd_info {
- char *name;
+ const char *name;
u32 opcode;
#define F_LEN_MASK (1U<<0)
@@ -399,10 +399,10 @@ struct cmd_info {
#define R_VECS (1 << VECS)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
- uint16_t rings;
+ u16 rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
- uint16_t devices;
+ u16 devices;
/* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command
@@ -412,20 +412,20 @@ struct cmd_info {
* No matter the address length, each address only takes
* one bit in the bitmap.
*/
- uint16_t addr_bitmap;
+ u16 addr_bitmap;
/* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- uint8_t len;
+ u8 len;
parser_cmd_handler handler;
};
struct cmd_entry {
struct hlist_node hlist;
- struct cmd_info *info;
+ const struct cmd_info *info;
};
enum {
@@ -474,7 +474,7 @@ struct parser_exec_state {
int saved_buf_addr_type;
bool is_ctx_wa;
- struct cmd_info *info;
+ const struct cmd_info *info;
struct intel_vgpu_workload *workload;
};
@@ -485,12 +485,12 @@ struct parser_exec_state {
static unsigned long bypass_scan_mask = 0;
/* ring ALL, type = 0 */
-static struct sub_op_bits sub_op_mi[] = {
+static const struct sub_op_bits sub_op_mi[] = {
{31, 29},
{28, 23},
};
-static struct decode_info decode_info_mi = {
+static const struct decode_info decode_info_mi = {
"MI",
OP_LEN_MI,
ARRAY_SIZE(sub_op_mi),
@@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = {
};
/* ring RCS, command type 2 */
-static struct sub_op_bits sub_op_2d[] = {
+static const struct sub_op_bits sub_op_2d[] = {
{31, 29},
{28, 22},
};
-static struct decode_info decode_info_2d = {
+static const struct decode_info decode_info_2d = {
"2D",
OP_LEN_2D,
ARRAY_SIZE(sub_op_2d),
@@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = {
};
/* ring RCS, command type 3 */
-static struct sub_op_bits sub_op_3d_media[] = {
+static const struct sub_op_bits sub_op_3d_media[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 16},
};
-static struct decode_info decode_info_3d_media = {
+static const struct decode_info decode_info_3d_media = {
"3D_Media",
OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media),
@@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = {
};
/* ring VCS, command type 3 */
-static struct sub_op_bits sub_op_mfx_vc[] = {
+static const struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = {
{20, 16},
};
-static struct decode_info decode_info_mfx_vc = {
+static const struct decode_info decode_info_mfx_vc = {
"MFX_VC",
OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc),
@@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = {
};
/* ring VECS, command type 3 */
-static struct sub_op_bits sub_op_vebox[] = {
+static const struct sub_op_bits sub_op_vebox[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = {
{20, 16},
};
-static struct decode_info decode_info_vebox = {
+static const struct decode_info decode_info_vebox = {
"VEBOX",
OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox),
sub_op_vebox,
};
-static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = {
&decode_info_mi,
NULL,
@@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
static inline u32 get_opcode(u32 cmd, int ring_id)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
@@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
return cmd >> (32 - d_info->op_len);
}
-static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
unsigned int opcode, int ring_id)
{
struct cmd_entry *e;
@@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
return NULL;
}
-static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
u32 cmd, int ring_id)
{
u32 opcode;
@@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
static inline void print_opcode(u32 cmd, int ring_id)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
int i;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
@@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
return 0;
}
-static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
{
if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len;
@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL.
*/
- if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
- struct cmd_info *info;
- uint32_t cmd_len = 0;
+ const struct cmd_info *info;
+ u32 cmd_len = 0;
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
@@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
static int mi_noop_index;
-static struct cmd_info cmd_info[] = {
+static const struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
@@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
- struct cmd_info *info;
+ const struct cmd_info *info;
u32 cmd;
int ret = 0;
@@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
I915_GTT_PAGE_SIZE)))
return -EINVAL;
- ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
@@ -2850,7 +2845,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
@@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, unsigned long rings)
{
- struct cmd_info *info = NULL;
+ const struct cmd_info *info = NULL;
unsigned int ring;
for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
@@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
{
int i;
struct cmd_entry *e;
- struct cmd_info *info;
+ const struct cmd_info *info;
unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index df1e14145747..035479e273be 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -340,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
+ port->id = resolution;
emulate_monitor_status_change(vgpu);
@@ -443,6 +446,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
}
/**
+ * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
+ * @vgpu: a vGPU
+ * @conncted: link state
+ *
+ * This function is used to trigger hotplug interrupt for vGPU
+ *
+ */
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ /* TODO: add more platforms support */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ } else {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
+ }
+ vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTD_HOTPLUG_STATUS_MASK;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+ }
+}
+
+/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
@@ -453,7 +486,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -476,7 +510,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index ea7c1c525b8c..a87f33e6a23c 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
GVT_PORT_MAX
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
-};
-
-enum intel_vgpu_edid {
- GVT_EDID_1024_768,
- GVT_EDID_1920_1200,
- GVT_EDID_NUM,
+ enum intel_vgpu_edid id;
};
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
}
}
+static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 1024;
+ case GVT_EDID_1920_1200:
+ return 1920;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 768;
+ case GVT_EDID_1920_1200:
+ return 1200;
+ default:
+ return 0;
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 51ed99a37803..3e7e2b80c857 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -29,7 +29,6 @@
*/
#include <linux/dma-buf.h>
-#include <drm/drmP.h>
#include <linux/vfio.h>
#include "i915_drv.h"
@@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 5d4bb35bb889..1fe6124918f1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == GMBUS_PIN_1_BXT)
+ port = PORT_B;
+ else if (port_select == GMBUS_PIN_2_BXT)
+ port = PORT_C;
+ else if (port_select == GMBUS_PIN_3_BXT)
+ port = PORT_D;
+ else if (port_select == GMBUS_PIN_4_CNP)
+ port = PORT_E;
+ return port;
+}
+
static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
- if (port_select == 1)
+ if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
- else if (port_select == 2)
+ else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
- else if (port_select == 3)
+ else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
return port;
}
@@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
- if (port_select == 2)
+ if (port_select == GMBUS_PIN_VGADDC)
port = PORT_E;
- else if (port_select == 4)
+ else if (port_select == GMBUS_PIN_DPC)
port = PORT_C;
- else if (port_select == 5)
+ else if (port_select == GMBUS_PIN_DPB)
port = PORT_B;
- else if (port_select == 6)
+ else if (port_select == GMBUS_PIN_DPD)
port = PORT_D;
return port;
}
@@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(dev_priv))
port = bxt_get_port_from_gmbus0(pin_select);
+ else if (IS_COFFEELAKE(dev_priv))
+ port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 85e6736f0a32..65e847392aea 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
- (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) ?
+ (INTEL_GEN(dev_priv) >= 9) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 733a2a0d0c30..43f4242062dd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -185,54 +185,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
+ .emulate_hotplug = intel_vgpu_emulate_hotplug,
};
-/**
- * intel_gvt_init_host - Load MPT modules and detect if we're running in host
- *
- * This function is called at the driver loading stage. If failed to find a
- * loadable MPT module or detect currently we're running in a VM, then GVT-g
- * will be disabled
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init_host(void)
-{
- if (intel_gvt_host.initialized)
- return 0;
-
- /* Xen DOM U */
- if (xen_domain() && !xen_initial_domain())
- return -ENODEV;
-
- /* Try to load MPT modules for hypervisors */
- if (xen_initial_domain()) {
- /* In Xen dom0 */
- intel_gvt_host.mpt = try_then_request_module(
- symbol_get(xengt_mpt), "xengt");
- intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
- } else {
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- /* not in Xen. Try KVMGT */
- intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvmgt");
- intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
-#endif
- }
-
- /* Fail to load MPT modules - bail out */
- if (!intel_gvt_host.mpt)
- return -EINVAL;
-
- gvt_dbg_core("Running with hypervisor %s in host mode\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
-
- intel_gvt_host.initialized = true;
- return 0;
-}
-
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
@@ -316,7 +271,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
@@ -352,13 +306,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
struct intel_vgpu *vgpu;
int ret;
- /*
- * Cannot initialize GVT device without intel_gvt_host gets
- * initialized first.
- */
- if (WARN_ON(!intel_gvt_host.initialized))
- return -EINVAL;
-
if (WARN_ON(dev_priv->gvt))
return -EEXIST;
@@ -420,13 +367,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types;
}
- ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
- &intel_gvt_ops);
- if (ret) {
- gvt_err("failed to register gvt-g host device: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -441,6 +381,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
+ intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.initialized = true;
return 0;
out_clean_types:
@@ -467,6 +409,45 @@ out_clean_idr:
return ret;
}
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-MODULE_SOFTDEP("pre: kvmgt");
-#endif
+int
+intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
+{
+ int ret;
+ void *gvt;
+
+ if (!intel_gvt_host.initialized)
+ return -ENODEV;
+
+ if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
+ m->type != INTEL_GVT_HYPERVISOR_XEN)
+ return -EINVAL;
+
+ /* Get a reference for device model module */
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ intel_gvt_host.mpt = m;
+ intel_gvt_host.hypervisor_type = m->type;
+ gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+
+ ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
+ &intel_gvt_ops);
+ if (ret < 0) {
+ gvt_err("Failed to init %s hypervisor module\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+ module_put(THIS_MODULE);
+ return -ENODEV;
+ }
+ gvt_dbg_core("Running with hypervisor %s in host mode\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
+
+void
+intel_gvt_unregister_hypervisor(void)
+{
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b4ab1dad0143..8bce09de4b82 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -52,12 +52,8 @@
#define GVT_MAX_VGPU 8
-enum {
- INTEL_GVT_HYPERVISOR_XEN = 0,
- INTEL_GVT_HYPERVISOR_KVM,
-};
-
struct intel_gvt_host {
+ struct device *dev;
bool initialized;
int hypervisor_type;
struct intel_gvt_mpt *mpt;
@@ -540,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
+
static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
{
/* We are 64bit bar. */
@@ -581,6 +579,7 @@ struct intel_gvt_ops {
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
+ void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
@@ -597,7 +596,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e9f343b124b0..bc64b810e0d5 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL;
else if (IS_BROXTON(gvt->dev_priv))
return D_BXT;
+ else if (IS_COFFEELAKE(gvt->dev_priv))
+ return D_CFL;
return 0;
}
@@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 old, new;
- uint32_t ack_reg_offset;
+ u32 ack_reg_offset;
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
}
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
- uint8_t t)
+ u8 t)
{
if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */
@@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_BROXTON(vgpu->gvt->dev_priv))
+ if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
if (op == GVT_AUX_NATIVE_WRITE) {
int t;
- uint8_t buf[16];
+ u8 buf[16];
if ((addr + len + 1) >= DPCD_SIZE) {
/*
@@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -3042,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+ MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
+ MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
return 0;
}
@@ -3303,7 +3303,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)) {
+ || IS_KABYLAKE(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index e1675a00df12..4862fb12778e 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,13 +33,19 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+enum hypervisor_type {
+ INTEL_GVT_HYPERVISOR_XEN = 0,
+ INTEL_GVT_HYPERVISOR_KVM,
+};
+
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
+ enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev, void *gvt);
+ void (*host_exit)(struct device *dev);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@@ -61,12 +67,12 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
+ int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
-extern struct intel_gvt_mpt kvmgt_mpt;
#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 6b9d1354ff29..67125c5eec6e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -581,9 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)
- || IS_BROXTON(gvt->dev_priv)) {
+ } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index dd3dfd00f4e6..d5fcc447d22f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
+
#define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region;
@@ -76,6 +78,11 @@ struct vfio_region {
void *data;
};
+struct vfio_edid_region {
+ struct vfio_region_gfx_edid vfio_edid_regs;
+ void *edid_blob;
+};
+
struct kvmgt_pgfn {
gfn_t gfn;
struct hlist_node hnode;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion,
};
+static int handle_edid_regs(struct intel_vgpu *vgpu,
+ struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
+ unsigned int data;
+
+ if (offset + count > sizeof(*regs))
+ return -EINVAL;
+
+ if (count != 4)
+ return -EINVAL;
+
+ if (is_write) {
+ data = *((unsigned int *)buf);
+ switch (offset) {
+ case offsetof(struct vfio_region_gfx_edid, link_state):
+ if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
+ if (!drm_edid_block_valid(
+ (u8 *)region->edid_blob,
+ 0,
+ true,
+ NULL)) {
+ gvt_vgpu_err("invalid EDID blob\n");
+ return -EINVAL;
+ }
+ intel_gvt_ops->emulate_hotplug(vgpu, true);
+ } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
+ intel_gvt_ops->emulate_hotplug(vgpu, false);
+ else {
+ gvt_vgpu_err("invalid EDID link state %d\n",
+ regs->link_state);
+ return -EINVAL;
+ }
+ regs->link_state = data;
+ break;
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
+ if (data > regs->edid_max_size) {
+ gvt_vgpu_err("EDID size is bigger than %d!\n",
+ regs->edid_max_size);
+ return -EINVAL;
+ }
+ regs->edid_size = data;
+ break;
+ default:
+ /* read-only regs */
+ gvt_vgpu_err("write read-only EDID region at offset %d\n",
+ offset);
+ return -EPERM;
+ }
+ } else {
+ memcpy(buf, (char *)regs + offset, count);
+ }
+
+ return count;
+}
+
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ if (offset + count > region->vfio_edid_regs.edid_size)
+ return -EINVAL;
+
+ if (is_write)
+ memcpy(region->edid_blob + offset, buf, count);
+ else
+ memcpy(buf, region->edid_blob + offset, count);
+
+ return count;
+}
+
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ struct vfio_edid_region *region =
+ (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos < region->vfio_edid_regs.edid_offset) {
+ ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
+ } else {
+ pos -= EDID_BLOB_OFFSET;
+ ret = handle_edid_blob(region, buf, count, pos, iswrite);
+ }
+
+ if (ret < 0)
+ gvt_vgpu_err("failed to access EDID region\n");
+
+ return ret;
+}
+
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+ kfree(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
+ .rw = intel_vgpu_reg_rw_edid,
+ .release = intel_vgpu_reg_release_edid,
+};
+
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ struct vfio_edid_region *base;
+ int ret;
+
+ base = kzalloc(sizeof(*base), GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+
+ /* TODO: Add multi-port and EDID extension block support */
+ base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
+ base->vfio_edid_regs.edid_max_size = EDID_SIZE;
+ base->vfio_edid_regs.edid_size = EDID_SIZE;
+ base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
+ base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
+ base->edid_blob = port->edid->edid_block;
+
+ ret = intel_vgpu_register_reg(vgpu,
+ VFIO_REGION_TYPE_GFX,
+ VFIO_REGION_SUBTYPE_GFX_EDID,
+ &intel_vgpu_regops_edid, EDID_SIZE,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_CAPS, base);
+
+ return ret;
+}
+
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
@@ -627,6 +769,12 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
+ /* Take a module reference as mdev core doesn't take
+ * a reference for vendor driver.
+ */
+ if (!try_module_get(THIS_MODULE))
+ goto undo_group;
+
ret = kvmgt_guest_init(mdev);
if (ret)
goto undo_group;
@@ -679,6 +827,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
&vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ /* dereference module reference taken at open */
+ module_put(THIS_MODULE);
+
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
@@ -703,7 +854,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
-static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
+static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
@@ -730,10 +881,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
return ((u64)start_hi << 32) | start_lo;
}
-static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
void *buf, unsigned int count, bool is_write)
{
- uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
+ u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
int ret;
if (is_write)
@@ -745,13 +896,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret;
}
-static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
{
return off >= vgpu_aperture_offset(vgpu) &&
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
}
-static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
void *aperture_va;
@@ -783,7 +934,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
@@ -1039,7 +1190,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
- unsigned int count, uint32_t flags,
+ unsigned int count, u32 flags,
void *data)
{
return 0;
@@ -1047,21 +1198,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
- unsigned int count, uint32_t flags, void *data)
+ unsigned int count, u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
- uint32_t flags, void *data)
+ u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
- uint32_t flags, void *data)
+ u32 flags, void *data)
{
struct eventfd_ctx *trigger;
@@ -1080,12 +1231,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return 0;
}
-static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
unsigned int index, unsigned int start, unsigned int count,
void *data)
{
int (*func)(struct intel_vgpu *vgpu, unsigned int index,
- unsigned int start, unsigned int count, uint32_t flags,
+ unsigned int start, unsigned int count, u32 flags,
void *data) = NULL;
switch (index) {
@@ -1477,7 +1628,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
return mdev_register_device(dev, &intel_vgpu_ops);
}
-static void kvmgt_host_exit(struct device *dev, void *gvt)
+static void kvmgt_host_exit(struct device *dev)
{
mdev_unregister_device(dev);
}
@@ -1871,7 +2022,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
return ret;
}
-struct intel_gvt_mpt kvmgt_mpt = {
+static struct intel_gvt_mpt kvmgt_mpt = {
+ .type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
@@ -1886,19 +2038,22 @@ struct intel_gvt_mpt kvmgt_mpt = {
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.set_opregion = kvmgt_set_opregion,
+ .set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
};
-EXPORT_SYMBOL_GPL(kvmgt_mpt);
static int __init kvmgt_init(void)
{
+ if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
+ return -ENODEV;
return 0;
}
static void __exit kvmgt_exit(void)
{
+ intel_gvt_unregister_hypervisor();
}
module_init(kvmgt_init);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 43f65848ecd6..ed4df2f6d60b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
-static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes, bool read)
{
struct intel_gvt *gvt = NULL;
@@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -171,7 +171,7 @@ out:
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 1ffc69eba30e..5874f1cb4306 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -43,15 +43,16 @@ struct intel_vgpu;
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
#define D_BXT (1 << 3)
+#define D_CFL (1 << 4)
-#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
-#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index d6e02c15ef97..7d84cfb9051a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
+ if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
continue;
// save
@@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) ||
- IS_KABYLAKE(gvt->dev_priv) ||
- IS_BROXTON(gvt->dev_priv))
+ if (INTEL_GEN(gvt->dev_priv) >= 9)
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 3ed34123d8d1..0f9440128123 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -50,11 +50,10 @@
* Zero on success, negative error code if failed
*/
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
- void *gvt, const void *ops)
+ void *gvt, const void *ops)
{
- /* optional to provide */
if (!intel_gvt_host.mpt->host_init)
- return 0;
+ return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
}
@@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
- void *gvt)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev, gvt);
+ intel_gvt_host.mpt->host_exit(dev);
}
/**
@@ -316,6 +314,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
}
/**
+ * intel_gvt_hypervisor_set_edid - Set EDID region for guest
+ * @vgpu: a vGPU
+ * @port_num: display port number
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
+ int port_num)
+{
+ if (!intel_gvt_host.mpt->set_edid)
+ return 0;
+
+ return intel_gvt_host.mpt->set_edid(vgpu, port_num);
+}
+
+/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
@@ -362,4 +377,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
+int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
+void intel_gvt_unregister_hypervisor(void);
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c32e7d5e8629..1c763a27a412 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
{
struct vgpu_sched_data *vgpu_data;
struct list_head *pos;
- static uint64_t stage_check;
+ static u64 stage_check;
int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
/* The timeslice accumulation reset at stage 0, which is
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 55bb7885e228..1bb8f936fdaa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
void *shadow_ring_buffer_va;
u32 *cs;
- if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
+ || IS_COFFEELAKE(req->i915))
&& is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
@@ -957,9 +958,7 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)
- || IS_BROXTON(gvt->dev_priv);
+ bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -1015,7 +1014,7 @@ complete:
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
- intel_runtime_pm_put(gvt->dev_priv);
+ intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1472,7 +1471,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2065cba59aab..0635b2c4bed7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -61,7 +61,7 @@ struct shadow_indirect_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
- uint32_t size;
+ u32 size;
};
#define PER_CTX_ADDR_MASK 0xfffff000
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 1fd64202d74e..6d787750d279 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync,
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
u32 cmd_len, u32 buf_type, u32 buf_addr_type,
- void *workload, char *cmd_name),
+ void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
buf_addr_type, workload, cmd_name),
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c628be05fbfe..720e2b10adaa 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN8(gvt->dev_priv))
+ if (IS_GEN(gvt->dev_priv, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
- else if (IS_GEN9(gvt->dev_priv))
+ else if (IS_GEN(gvt->dev_priv, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
+ /*TODO: add more platforms support */
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ if (ret)
+ goto out_clean_sched_policy;
+
return vgpu;
out_clean_sched_policy: