summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 19:23:15 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-08 19:23:15 +0300
commit851ca779d110f694b5d078bc4af06d3ad37169e8 (patch)
tree3d03de09e44ef02a6f73924f32fa21646347e64e /drivers/gpu/drm/i915
parentb5dd0c658c31b469ccff1b637e5124851e7a4a1c (diff)
parent4b057e73f28f1df13b77b77a52094238ffdf8abd (diff)
downloadlinux-851ca779d110f694b5d078bc4af06d3ad37169e8.tar.xz
Merge tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main drm pull request for the 5.1 merge window. The big changes I'd highlight are: - nouveau has HMM support now, there is finally an in-tree user so we can quieten down the rip it out people. - i915 now enables fastboot by default on Skylake+ - Displayport Multistream support has been refactored and should hopefully be more reliable. Core: - header cleanups aiming towards removing drmP.h - dma-buf fence seqnos to 64-bits - common helper for DP mst hotplug for radeon,i915,amdgpu + new refcounting scheme - MST i2c improvements - drm_syncobj_cb removal - ARM FB compression fourcc - P010 + P016 fourcc - allwinner tiled format modifier - i2c over aux I2C_M_STOP support - DRM_AUTH handling fixes TTM: - ref/unref renaming New driver: - ARM komeda display driver scheduler: - refactor mirror list handling - rework hw fence processing - 0 run queue entity fix bridge: - TI DS90C185 LVDS bridge - thc631lvdm83d bridge improvements - cadence + allwinner DSI ported to generic phy panels: - Sitronix ST7701 panel - Kingdisplay KD097D04 - LeMaker BL035-RGB-002 - PDA 91-00156-A0 - Innolux EE101IA-01D i915: - Enable fastboot by default on SKL+/VLV/CHV - Export RPCS configuration for ICL media driver - Coffelake PCI ID - CNL clocks setup fixes - ACPI/PMIC support for MIPI/DSI - Per-engine WA init for all engines - Shrinker locking fixes - Kerneldoc updates - Lots of ring improvements and reset fixes - Coffeelake GVT Support - VFIO GVT EDID Region support - runtime PM wakeref tracking - ILK->IVB primary plane enable delays - userptr mutex locking fixes - DSI fixes - LVDS/TV cleanups - HW readout fixes - LUT robustness fixes - ICL display and watermark fixes - gem mmap race fix amdgpu: - add scheduled dependencies interface - DCC on scanout surfaces - vega10/20 BACO support - Multiple IH rings on soc15 - XGMI locking fixes - DC i2c/aux cleanups - runtime SMU debug interface - Kexec improvmeents - SR-IOV fixes - DC freesync + ABM fixes - GDS fixes - GPUVM fixes - vega20 PCIE DPM switching fixes - Context priority handling fixes radeon: - fix missing break in evergreen parser nouveau: - SVM support via HMM msm: - QCOM Compressed modifier support exynos: - s5pv210 rotator support imx: - zpos property support - pending update fixes v3d: - cache flush improvments vc4: - reflection support - HDMI overscan support tegra: - CEC refactoring - HDMI audio fixes - Tegra186 prep work - SOR crossbar device tree fixes sun4i: - implicit fencing support - YUV and scalar support improvements - A23 support - tiling fixes atmel-hlcdc: - clipping and rotation property fixes qxl: - BO and PRIME improvements - generic fbdev emulation dw-hdmi: - HDMI 2.0 2160p - YUV420 ouput rockchip: - implicit fencing support - reflection proerties virtio-gpu: - use generic fbdev emulation tilcdc: - cpufreq vs crtc init fix rcar-du: - R8A774C0 support - D3/E3 RGB output routing fixes and DPAD0 support - RA87744 LVDS support bochs: - atomic and generic fbdev emulation - ID mismatch error on bochs load meson: - remove firmware fbs" * tag 'drm-next-2019-03-06' of git://anongit.freedesktop.org/drm/drm: (1130 commits) drm/amd/display: Use vrr friendly pageflip throttling in DC. drm/imx: only send commit done event when all state has been applied drm/imx: allow building under COMPILE_TEST drm/imx: imx-tve: depend on COMMON_CLK drm/imx: ipuv3-plane: add zpos property drm/imx: ipuv3-plane: add function to query atomic update status gpu: ipu-v3: prg: add function to get channel configure status gpu: ipu-v3: pre: add double buffer status readback drm/amdgpu: Bump amdgpu version for context priority override. drm/amdgpu/powerplay: fix typo in BACO header guards drm/amdgpu/powerplay: fix return codes in BACO code drm/amdgpu: add missing license on baco files drm/bochs: Fix the ID mismatch error drm/nouveau/dmem: use dma addresses during migration copies drm/nouveau/dmem: use physical vram addresses during migration copies drm/nouveau/dmem: extend copy function to allow direct use of physical addresses drm/nouveau/svm: new ioctl to migrate process memory to GPU memory drm/nouveau/dmem: device memory helpers for SVM drm/nouveau/svm: initial support for shared virtual memory drm/nouveau: prepare for enabling svm with existing userspace interfaces ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/dvo.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c83
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h37
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c185
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h30
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c10
-rw-r--r--drivers/gpu/drm/i915/i915_active.c286
-rw-r--r--drivers/gpu/drm/i915/i915_active.h425
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h36
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1064
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c283
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h519
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c961
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c388
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c229
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c175
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c225
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c296
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h41
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c475
-rw-r--r--drivers/gpu/drm/i915/i915_params.c38
-rw-r--r--drivers/gpu/drm/i915/i915_params.h13
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c33
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c33
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c23
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h300
-rw-r--r--drivers/gpu/drm/i915/i915_request.c452
-rw-r--r--drivers/gpu/drm/i915/i915_request.h497
-rw-r--r--drivers/gpu/drm/i915/i915_reset.c1349
-rw-r--r--drivers/gpu/drm/i915/i915_reset.h59
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c29
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c17
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c27
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c257
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h61
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h57
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c261
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h58
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c9
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c43
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c30
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c911
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c60
-rw-r--r--drivers/gpu/drm/i915/intel_color.c411
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c91
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c93
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c202
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c107
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h35
-rw-r--r--drivers/gpu/drm/i915/intel_display.c876
-rw-r--r--drivers/gpu/drm/i915/intel_display.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c643
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c97
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c302
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h55
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h231
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c430
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c35
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c24
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c25
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c12
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c207
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c59
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c23
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c703
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c36
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c408
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c48
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c102
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1229
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c83
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c699
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h289
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c594
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c123
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c145
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c727
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c511
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c5
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c157
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c607
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c105
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c123
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c524
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c464
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c81
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c91
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c470
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c417
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c272
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c123
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c162
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c55
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c31
180 files changed, 14420 insertions, 11034 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9e36ffb5eb7c..ad4d71161dda 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,11 +21,11 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select STACKDEPOT
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
- select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
select DRM_DEBUG_SELFTEST
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select STACKDEPOT
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 19b5fe5016bf..1787e1299b1b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
+subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
@@ -40,9 +41,10 @@ i915-y := i915_drv.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
- i915_suspend.o \
- i915_syncmap.o \
+ i915_reset.o \
+ i915_suspend.o \
i915_sw_fence.o \
+ i915_syncmap.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
@@ -55,7 +57,9 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code
-i915-y += i915_cmd_parser.o \
+i915-y += \
+ i915_active.o \
+ i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_clflush.o \
i915_gem_context.o \
@@ -166,6 +170,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
+ selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
@@ -198,3 +203,4 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 5e6a3013da49..16e0345b711f 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -24,7 +24,6 @@
#define _INTEL_DVO_H
#include <linux/i2c.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index b016dc753db9..271fb46d4dd0 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 359d37d5c958..1fa2f65c3cd1 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
@@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +219,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
@@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 77ae634eb11c..35b4ec3f7618 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -55,10 +55,10 @@ struct sub_op_bits {
int low;
};
struct decode_info {
- char *name;
+ const char *name;
int op_len;
int nr_sub_op;
- struct sub_op_bits *sub_op;
+ const struct sub_op_bits *sub_op;
};
#define MAX_CMD_BUDGET 0x7fffffff
@@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
struct cmd_info {
- char *name;
+ const char *name;
u32 opcode;
#define F_LEN_MASK (1U<<0)
@@ -399,10 +399,10 @@ struct cmd_info {
#define R_VECS (1 << VECS)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
- uint16_t rings;
+ u16 rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
- uint16_t devices;
+ u16 devices;
/* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command
@@ -412,20 +412,20 @@ struct cmd_info {
* No matter the address length, each address only takes
* one bit in the bitmap.
*/
- uint16_t addr_bitmap;
+ u16 addr_bitmap;
/* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- uint8_t len;
+ u8 len;
parser_cmd_handler handler;
};
struct cmd_entry {
struct hlist_node hlist;
- struct cmd_info *info;
+ const struct cmd_info *info;
};
enum {
@@ -474,7 +474,7 @@ struct parser_exec_state {
int saved_buf_addr_type;
bool is_ctx_wa;
- struct cmd_info *info;
+ const struct cmd_info *info;
struct intel_vgpu_workload *workload;
};
@@ -485,12 +485,12 @@ struct parser_exec_state {
static unsigned long bypass_scan_mask = 0;
/* ring ALL, type = 0 */
-static struct sub_op_bits sub_op_mi[] = {
+static const struct sub_op_bits sub_op_mi[] = {
{31, 29},
{28, 23},
};
-static struct decode_info decode_info_mi = {
+static const struct decode_info decode_info_mi = {
"MI",
OP_LEN_MI,
ARRAY_SIZE(sub_op_mi),
@@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = {
};
/* ring RCS, command type 2 */
-static struct sub_op_bits sub_op_2d[] = {
+static const struct sub_op_bits sub_op_2d[] = {
{31, 29},
{28, 22},
};
-static struct decode_info decode_info_2d = {
+static const struct decode_info decode_info_2d = {
"2D",
OP_LEN_2D,
ARRAY_SIZE(sub_op_2d),
@@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = {
};
/* ring RCS, command type 3 */
-static struct sub_op_bits sub_op_3d_media[] = {
+static const struct sub_op_bits sub_op_3d_media[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 16},
};
-static struct decode_info decode_info_3d_media = {
+static const struct decode_info decode_info_3d_media = {
"3D_Media",
OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media),
@@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = {
};
/* ring VCS, command type 3 */
-static struct sub_op_bits sub_op_mfx_vc[] = {
+static const struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = {
{20, 16},
};
-static struct decode_info decode_info_mfx_vc = {
+static const struct decode_info decode_info_mfx_vc = {
"MFX_VC",
OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc),
@@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = {
};
/* ring VECS, command type 3 */
-static struct sub_op_bits sub_op_vebox[] = {
+static const struct sub_op_bits sub_op_vebox[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = {
{20, 16},
};
-static struct decode_info decode_info_vebox = {
+static const struct decode_info decode_info_vebox = {
"VEBOX",
OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox),
sub_op_vebox,
};
-static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = {
&decode_info_mi,
NULL,
@@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
static inline u32 get_opcode(u32 cmd, int ring_id)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
@@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
return cmd >> (32 - d_info->op_len);
}
-static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
+static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
unsigned int opcode, int ring_id)
{
struct cmd_entry *e;
@@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
return NULL;
}
-static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
+static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
u32 cmd, int ring_id)
{
u32 opcode;
@@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
static inline void print_opcode(u32 cmd, int ring_id)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
int i;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
@@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
return 0;
}
-static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
{
if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len;
@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL.
*/
- if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
- struct cmd_info *info;
- uint32_t cmd_len = 0;
+ const struct cmd_info *info;
+ u32 cmd_len = 0;
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
@@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
static int mi_noop_index;
-static struct cmd_info cmd_info[] = {
+static const struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
@@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
- struct cmd_info *info;
+ const struct cmd_info *info;
u32 cmd;
int ret = 0;
@@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
I915_GTT_PAGE_SIZE)))
return -EINVAL;
- ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
@@ -2850,7 +2845,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
@@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
+static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, unsigned long rings)
{
- struct cmd_info *info = NULL;
+ const struct cmd_info *info = NULL;
unsigned int ring;
for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
@@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
{
int i;
struct cmd_entry *e;
- struct cmd_info *info;
+ const struct cmd_info *info;
unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index df1e14145747..035479e273be 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -340,6 +342,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->dpcd->data_valid = true;
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
port->type = type;
+ port->id = resolution;
emulate_monitor_status_change(vgpu);
@@ -443,6 +446,36 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
}
/**
+ * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
+ * @vgpu: a vGPU
+ * @conncted: link state
+ *
+ * This function is used to trigger hotplug interrupt for vGPU
+ *
+ */
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ /* TODO: add more platforms support */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (connected) {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+ SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ } else {
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+ ~SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, SDEISR) &= ~SDE_PORTD_HOTPLUG_CPT;
+ }
+ vgpu_vreg_t(vgpu, SDEIIR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+ PORTD_HOTPLUG_STATUS_MASK;
+ intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
+ }
+}
+
+/**
* intel_vgpu_clean_display - clean vGPU virtual display emulation
* @vgpu: a vGPU
*
@@ -453,7 +486,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -476,7 +510,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index ea7c1c525b8c..a87f33e6a23c 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -146,18 +146,19 @@ enum intel_vgpu_port_type {
GVT_PORT_MAX
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
struct intel_vgpu_port {
/* per display EDID information */
struct intel_vgpu_edid_data *edid;
/* per display DPCD information */
struct intel_vgpu_dpcd_data *dpcd;
int type;
-};
-
-enum intel_vgpu_edid {
- GVT_EDID_1024_768,
- GVT_EDID_1920_1200,
- GVT_EDID_NUM,
+ enum intel_vgpu_edid id;
};
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
@@ -172,6 +173,30 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
}
}
+static inline unsigned int vgpu_edid_xres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 1024;
+ case GVT_EDID_1920_1200:
+ return 1920;
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int vgpu_edid_yres(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return 768;
+ case GVT_EDID_1920_1200:
+ return 1200;
+ default:
+ return 0;
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 51ed99a37803..3e7e2b80c857 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -29,7 +29,6 @@
*/
#include <linux/dma-buf.h>
-#include <drm/drmP.h>
#include <linux/vfio.h>
#include "i915_drv.h"
@@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
unsigned int tiling_mode = 0;
unsigned int stride = 0;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 5d4bb35bb889..1fe6124918f1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr;
}
+static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
+{
+ int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+ int port = -EINVAL;
+
+ if (port_select == GMBUS_PIN_1_BXT)
+ port = PORT_B;
+ else if (port_select == GMBUS_PIN_2_BXT)
+ port = PORT_C;
+ else if (port_select == GMBUS_PIN_3_BXT)
+ port = PORT_D;
+ else if (port_select == GMBUS_PIN_4_CNP)
+ port = PORT_E;
+ return port;
+}
+
static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
- if (port_select == 1)
+ if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
- else if (port_select == 2)
+ else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
- else if (port_select == 3)
+ else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
return port;
}
@@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
- if (port_select == 2)
+ if (port_select == GMBUS_PIN_VGADDC)
port = PORT_E;
- else if (port_select == 4)
+ else if (port_select == GMBUS_PIN_DPC)
port = PORT_C;
- else if (port_select == 5)
+ else if (port_select == GMBUS_PIN_DPB)
port = PORT_B;
- else if (port_select == 6)
+ else if (port_select == GMBUS_PIN_DPD)
port = PORT_D;
return port;
}
@@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(dev_priv))
port = bxt_get_port_from_gmbus0(pin_select);
+ else if (IS_COFFEELAKE(dev_priv))
+ port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0))
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 85e6736f0a32..65e847392aea 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
switch (tiled) {
case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64;
@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled)
return -ENODEV;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK,
@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
}
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
- (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) ?
+ (INTEL_GEN(dev_priv) >= 9) ?
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 733a2a0d0c30..43f4242062dd 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -185,54 +185,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
+ .emulate_hotplug = intel_vgpu_emulate_hotplug,
};
-/**
- * intel_gvt_init_host - Load MPT modules and detect if we're running in host
- *
- * This function is called at the driver loading stage. If failed to find a
- * loadable MPT module or detect currently we're running in a VM, then GVT-g
- * will be disabled
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init_host(void)
-{
- if (intel_gvt_host.initialized)
- return 0;
-
- /* Xen DOM U */
- if (xen_domain() && !xen_initial_domain())
- return -ENODEV;
-
- /* Try to load MPT modules for hypervisors */
- if (xen_initial_domain()) {
- /* In Xen dom0 */
- intel_gvt_host.mpt = try_then_request_module(
- symbol_get(xengt_mpt), "xengt");
- intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
- } else {
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- /* not in Xen. Try KVMGT */
- intel_gvt_host.mpt = try_then_request_module(
- symbol_get(kvmgt_mpt), "kvmgt");
- intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
-#endif
- }
-
- /* Fail to load MPT modules - bail out */
- if (!intel_gvt_host.mpt)
- return -EINVAL;
-
- gvt_dbg_core("Running with hypervisor %s in host mode\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
-
- intel_gvt_host.initialized = true;
- return 0;
-}
-
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
@@ -316,7 +271,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
@@ -352,13 +306,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
struct intel_vgpu *vgpu;
int ret;
- /*
- * Cannot initialize GVT device without intel_gvt_host gets
- * initialized first.
- */
- if (WARN_ON(!intel_gvt_host.initialized))
- return -EINVAL;
-
if (WARN_ON(dev_priv->gvt))
return -EEXIST;
@@ -420,13 +367,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types;
}
- ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
- &intel_gvt_ops);
- if (ret) {
- gvt_err("failed to register gvt-g host device: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -441,6 +381,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
+ intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.initialized = true;
return 0;
out_clean_types:
@@ -467,6 +409,45 @@ out_clean_idr:
return ret;
}
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-MODULE_SOFTDEP("pre: kvmgt");
-#endif
+int
+intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
+{
+ int ret;
+ void *gvt;
+
+ if (!intel_gvt_host.initialized)
+ return -ENODEV;
+
+ if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
+ m->type != INTEL_GVT_HYPERVISOR_XEN)
+ return -EINVAL;
+
+ /* Get a reference for device model module */
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ intel_gvt_host.mpt = m;
+ intel_gvt_host.hypervisor_type = m->type;
+ gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+
+ ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
+ &intel_gvt_ops);
+ if (ret < 0) {
+ gvt_err("Failed to init %s hypervisor module\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+ module_put(THIS_MODULE);
+ return -ENODEV;
+ }
+ gvt_dbg_core("Running with hypervisor %s in host mode\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
+
+void
+intel_gvt_unregister_hypervisor(void)
+{
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b4ab1dad0143..8bce09de4b82 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -52,12 +52,8 @@
#define GVT_MAX_VGPU 8
-enum {
- INTEL_GVT_HYPERVISOR_XEN = 0,
- INTEL_GVT_HYPERVISOR_KVM,
-};
-
struct intel_gvt_host {
+ struct device *dev;
bool initialized;
int hypervisor_type;
struct intel_gvt_mpt *mpt;
@@ -540,6 +536,8 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected);
+
static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
{
/* We are 64bit bar. */
@@ -581,6 +579,7 @@ struct intel_gvt_ops {
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
+ void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
};
@@ -597,7 +596,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e9f343b124b0..bc64b810e0d5 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL;
else if (IS_BROXTON(gvt->dev_priv))
return D_BXT;
+ else if (IS_COFFEELAKE(gvt->dev_priv))
+ return D_CFL;
return 0;
}
@@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 old, new;
- uint32_t ack_reg_offset;
+ u32 ack_reg_offset;
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
}
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
- uint8_t t)
+ u8 t)
{
if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */
@@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_BROXTON(vgpu->gvt->dev_priv))
+ if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
@@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
if (op == GVT_AUX_NATIVE_WRITE) {
int t;
- uint8_t buf[16];
+ u8 buf[16];
if ((addr + len + 1) >= DPCD_SIZE) {
/*
@@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)
+ || IS_COFFEELAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -3042,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL);
- MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+ MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
+ MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
return 0;
}
@@ -3303,7 +3303,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
} else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)) {
+ || IS_KABYLAKE(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index e1675a00df12..4862fb12778e 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,13 +33,19 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+enum hypervisor_type {
+ INTEL_GVT_HYPERVISOR_XEN = 0,
+ INTEL_GVT_HYPERVISOR_KVM,
+};
+
/*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
+ enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev, void *gvt);
+ void (*host_exit)(struct device *dev);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
@@ -61,12 +67,12 @@ struct intel_gvt_mpt {
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
int (*set_opregion)(void *vgpu);
+ int (*set_edid)(void *vgpu, int port_num);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
-extern struct intel_gvt_mpt kvmgt_mpt;
#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 6b9d1354ff29..67125c5eec6e 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -581,9 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)
- || IS_BROXTON(gvt->dev_priv)) {
+ } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index dd3dfd00f4e6..d5fcc447d22f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -57,6 +57,8 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define EDID_BLOB_OFFSET (PAGE_SIZE/2)
+
#define OPREGION_SIGNATURE "IntelGraphicsMem"
struct vfio_region;
@@ -76,6 +78,11 @@ struct vfio_region {
void *data;
};
+struct vfio_edid_region {
+ struct vfio_region_gfx_edid vfio_edid_regs;
+ void *edid_blob;
+};
+
struct kvmgt_pgfn {
gfn_t gfn;
struct hlist_node hnode;
@@ -427,6 +434,111 @@ static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
.release = intel_vgpu_reg_release_opregion,
};
+static int handle_edid_regs(struct intel_vgpu *vgpu,
+ struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
+ unsigned int data;
+
+ if (offset + count > sizeof(*regs))
+ return -EINVAL;
+
+ if (count != 4)
+ return -EINVAL;
+
+ if (is_write) {
+ data = *((unsigned int *)buf);
+ switch (offset) {
+ case offsetof(struct vfio_region_gfx_edid, link_state):
+ if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
+ if (!drm_edid_block_valid(
+ (u8 *)region->edid_blob,
+ 0,
+ true,
+ NULL)) {
+ gvt_vgpu_err("invalid EDID blob\n");
+ return -EINVAL;
+ }
+ intel_gvt_ops->emulate_hotplug(vgpu, true);
+ } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
+ intel_gvt_ops->emulate_hotplug(vgpu, false);
+ else {
+ gvt_vgpu_err("invalid EDID link state %d\n",
+ regs->link_state);
+ return -EINVAL;
+ }
+ regs->link_state = data;
+ break;
+ case offsetof(struct vfio_region_gfx_edid, edid_size):
+ if (data > regs->edid_max_size) {
+ gvt_vgpu_err("EDID size is bigger than %d!\n",
+ regs->edid_max_size);
+ return -EINVAL;
+ }
+ regs->edid_size = data;
+ break;
+ default:
+ /* read-only regs */
+ gvt_vgpu_err("write read-only EDID region at offset %d\n",
+ offset);
+ return -EPERM;
+ }
+ } else {
+ memcpy(buf, (char *)regs + offset, count);
+ }
+
+ return count;
+}
+
+static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
+ size_t count, u16 offset, bool is_write)
+{
+ if (offset + count > region->vfio_edid_regs.edid_size)
+ return -EINVAL;
+
+ if (is_write)
+ memcpy(region->edid_blob + offset, buf, count);
+ else
+ memcpy(buf, region->edid_blob + offset, count);
+
+ return count;
+}
+
+static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ int ret;
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ struct vfio_edid_region *region =
+ (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos < region->vfio_edid_regs.edid_offset) {
+ ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
+ } else {
+ pos -= EDID_BLOB_OFFSET;
+ ret = handle_edid_blob(region, buf, count, pos, iswrite);
+ }
+
+ if (ret < 0)
+ gvt_vgpu_err("failed to access EDID region\n");
+
+ return ret;
+}
+
+static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+ kfree(region->data);
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
+ .rw = intel_vgpu_reg_rw_edid,
+ .release = intel_vgpu_reg_release_edid,
+};
+
static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
unsigned int type, unsigned int subtype,
const struct intel_vgpu_regops *ops,
@@ -493,6 +605,36 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
+static int kvmgt_set_edid(void *p_vgpu, int port_num)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ struct vfio_edid_region *base;
+ int ret;
+
+ base = kzalloc(sizeof(*base), GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+
+ /* TODO: Add multi-port and EDID extension block support */
+ base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
+ base->vfio_edid_regs.edid_max_size = EDID_SIZE;
+ base->vfio_edid_regs.edid_size = EDID_SIZE;
+ base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
+ base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
+ base->edid_blob = port->edid->edid_block;
+
+ ret = intel_vgpu_register_reg(vgpu,
+ VFIO_REGION_TYPE_GFX,
+ VFIO_REGION_SUBTYPE_GFX_EDID,
+ &intel_vgpu_regops_edid, EDID_SIZE,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_CAPS, base);
+
+ return ret;
+}
+
static void kvmgt_put_vfio_device(void *vgpu)
{
if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
@@ -627,6 +769,12 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
+ /* Take a module reference as mdev core doesn't take
+ * a reference for vendor driver.
+ */
+ if (!try_module_get(THIS_MODULE))
+ goto undo_group;
+
ret = kvmgt_guest_init(mdev);
if (ret)
goto undo_group;
@@ -679,6 +827,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
&vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ /* dereference module reference taken at open */
+ module_put(THIS_MODULE);
+
info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info);
@@ -703,7 +854,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
-static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
+static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
@@ -730,10 +881,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
return ((u64)start_hi << 32) | start_lo;
}
-static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
void *buf, unsigned int count, bool is_write)
{
- uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
+ u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
int ret;
if (is_write)
@@ -745,13 +896,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret;
}
-static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
{
return off >= vgpu_aperture_offset(vgpu) &&
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
}
-static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write)
{
void *aperture_va;
@@ -783,7 +934,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
- uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
@@ -1039,7 +1190,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
- unsigned int count, uint32_t flags,
+ unsigned int count, u32 flags,
void *data)
{
return 0;
@@ -1047,21 +1198,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start,
- unsigned int count, uint32_t flags, void *data)
+ unsigned int count, u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
- uint32_t flags, void *data)
+ u32 flags, void *data)
{
return 0;
}
static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
- uint32_t flags, void *data)
+ u32 flags, void *data)
{
struct eventfd_ctx *trigger;
@@ -1080,12 +1231,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return 0;
}
-static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
unsigned int index, unsigned int start, unsigned int count,
void *data)
{
int (*func)(struct intel_vgpu *vgpu, unsigned int index,
- unsigned int start, unsigned int count, uint32_t flags,
+ unsigned int start, unsigned int count, u32 flags,
void *data) = NULL;
switch (index) {
@@ -1477,7 +1628,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
return mdev_register_device(dev, &intel_vgpu_ops);
}
-static void kvmgt_host_exit(struct device *dev, void *gvt)
+static void kvmgt_host_exit(struct device *dev)
{
mdev_unregister_device(dev);
}
@@ -1871,7 +2022,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
return ret;
}
-struct intel_gvt_mpt kvmgt_mpt = {
+static struct intel_gvt_mpt kvmgt_mpt = {
+ .type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
@@ -1886,19 +2038,22 @@ struct intel_gvt_mpt kvmgt_mpt = {
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.set_opregion = kvmgt_set_opregion,
+ .set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
};
-EXPORT_SYMBOL_GPL(kvmgt_mpt);
static int __init kvmgt_init(void)
{
+ if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
+ return -ENODEV;
return 0;
}
static void __exit kvmgt_exit(void)
{
+ intel_gvt_unregister_hypervisor();
}
module_init(kvmgt_init);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 43f65848ecd6..ed4df2f6d60b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
-static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes, bool read)
{
struct intel_gvt *gvt = NULL;
@@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -171,7 +171,7 @@ out:
* Returns:
* Zero on success, negative error code if failed
*/
-int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
+int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 1ffc69eba30e..5874f1cb4306 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -43,15 +43,16 @@ struct intel_vgpu;
#define D_SKL (1 << 1)
#define D_KBL (1 << 2)
#define D_BXT (1 << 3)
+#define D_CFL (1 << 4)
-#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT)
-#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
-#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT)
-#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_PRE_SKL (D_BDW)
-#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT)
+#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index d6e02c15ef97..7d84cfb9051a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
+ if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- && mmio->in_context)
+ if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
+ || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
continue;
// save
@@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (IS_SKYLAKE(gvt->dev_priv) ||
- IS_KABYLAKE(gvt->dev_priv) ||
- IS_BROXTON(gvt->dev_priv))
+ if (INTEL_GEN(gvt->dev_priv) >= 9)
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 3ed34123d8d1..0f9440128123 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -50,11 +50,10 @@
* Zero on success, negative error code if failed
*/
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
- void *gvt, const void *ops)
+ void *gvt, const void *ops)
{
- /* optional to provide */
if (!intel_gvt_host.mpt->host_init)
- return 0;
+ return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
}
@@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
- void *gvt)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev, gvt);
+ intel_gvt_host.mpt->host_exit(dev);
}
/**
@@ -316,6 +314,23 @@ static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
}
/**
+ * intel_gvt_hypervisor_set_edid - Set EDID region for guest
+ * @vgpu: a vGPU
+ * @port_num: display port number
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
+ int port_num)
+{
+ if (!intel_gvt_host.mpt->set_edid)
+ return 0;
+
+ return intel_gvt_host.mpt->set_edid(vgpu, port_num);
+}
+
+/**
* intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
* @vgpu: a vGPU
*
@@ -362,4 +377,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
+int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
+void intel_gvt_unregister_hypervisor(void);
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c32e7d5e8629..1c763a27a412 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
{
struct vgpu_sched_data *vgpu_data;
struct list_head *pos;
- static uint64_t stage_check;
+ static u64 stage_check;
int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
/* The timeslice accumulation reset at stage 0, which is
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 55bb7885e228..1bb8f936fdaa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
void *shadow_ring_buffer_va;
u32 *cs;
- if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+ if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
+ || IS_COFFEELAKE(req->i915))
&& is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
@@ -957,9 +958,7 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
- || IS_KABYLAKE(gvt->dev_priv)
- || IS_BROXTON(gvt->dev_priv);
+ bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
@@ -1015,7 +1014,7 @@ complete:
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
- intel_runtime_pm_put(gvt->dev_priv);
+ intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1472,7 +1471,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2065cba59aab..0635b2c4bed7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -61,7 +61,7 @@ struct shadow_indirect_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
- uint32_t size;
+ u32 size;
};
#define PER_CTX_ADDR_MASK 0xfffff000
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 1fd64202d74e..6d787750d279 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync,
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
u32 cmd_len, u32 buf_type, u32 buf_addr_type,
- void *workload, char *cmd_name),
+ void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
buf_addr_type, workload, cmd_name),
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c628be05fbfe..720e2b10adaa 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN8(gvt->dev_priv))
+ if (IS_GEN(gvt->dev_priv, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
- else if (IS_GEN9(gvt->dev_priv))
+ else if (IS_GEN(gvt->dev_priv, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
@@ -428,6 +428,12 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
+ /*TODO: add more platforms support */
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ if (ret)
+ goto out_clean_sched_policy;
+
return vgpu;
out_clean_sched_policy:
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
new file mode 100644
index 000000000000..215b6ff8aa73
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -0,0 +1,286 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_active.h"
+
+#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
+
+/*
+ * Active refs memory management
+ *
+ * To be more economical with memory, we reap all the i915_active trees as
+ * they idle (when we know the active requests are inactive) and allocate the
+ * nodes from a local slab cache to hopefully reduce the fragmentation.
+ */
+static struct i915_global_active {
+ struct kmem_cache *slab_cache;
+} global;
+
+struct active_node {
+ struct i915_active_request base;
+ struct i915_active *ref;
+ struct rb_node node;
+ u64 timeline;
+};
+
+static void
+__active_park(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ GEM_BUG_ON(i915_active_request_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
+ ref->tree = RB_ROOT;
+}
+
+static void
+__active_retire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!ref->count);
+ if (--ref->count)
+ return;
+
+ /* return the unused nodes to our slabcache */
+ __active_park(ref);
+
+ ref->retire(ref);
+}
+
+static void
+node_retire(struct i915_active_request *base, struct i915_request *rq)
+{
+ __active_retire(container_of(base, struct active_node, base)->ref);
+}
+
+static void
+last_retire(struct i915_active_request *base, struct i915_request *rq)
+{
+ __active_retire(container_of(base, struct i915_active, last));
+}
+
+static struct i915_active_request *
+active_instance(struct i915_active *ref, u64 idx)
+{
+ struct active_node *node;
+ struct rb_node **p, *parent;
+ struct i915_request *old;
+
+ /*
+ * We track the most recently used timeline to skip a rbtree search
+ * for the common case, under typical loads we never need the rbtree
+ * at all. We can reuse the last slot if it is empty, that is
+ * after the previous activity has been retired, or if it matches the
+ * current timeline.
+ *
+ * Note that we allow the timeline to be active simultaneously in
+ * the rbtree and the last cache. We do this to avoid having
+ * to search and replace the rbtree element for a new timeline, with
+ * the cost being that we must be aware that the ref may be retired
+ * twice for the same timeline (as the older rbtree element will be
+ * retired before the new request added to last).
+ */
+ old = i915_active_request_raw(&ref->last, BKL(ref));
+ if (!old || old->fence.context == idx)
+ goto out;
+
+ /* Move the currently active fence into the rbtree */
+ idx = old->fence.context;
+
+ parent = NULL;
+ p = &ref->tree.rb_node;
+ while (*p) {
+ parent = *p;
+
+ node = rb_entry(parent, struct active_node, node);
+ if (node->timeline == idx)
+ goto replace;
+
+ if (node->timeline < idx)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+
+ /* kmalloc may retire the ref->last (thanks shrinker)! */
+ if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
+ kmem_cache_free(global.slab_cache, node);
+ goto out;
+ }
+
+ if (unlikely(!node))
+ return ERR_PTR(-ENOMEM);
+
+ i915_active_request_init(&node->base, NULL, node_retire);
+ node->ref = ref;
+ node->timeline = idx;
+
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, &ref->tree);
+
+replace:
+ /*
+ * Overwrite the previous active slot in the rbtree with last,
+ * leaving last zeroed. If the previous slot is still active,
+ * we must be careful as we now only expect to receive one retire
+ * callback not two, and so much undo the active counting for the
+ * overwritten slot.
+ */
+ if (i915_active_request_isset(&node->base)) {
+ /* Retire ourselves from the old rq->active_list */
+ __list_del_entry(&node->base.link);
+ ref->count--;
+ GEM_BUG_ON(!ref->count);
+ }
+ GEM_BUG_ON(list_empty(&ref->last.link));
+ list_replace_init(&ref->last.link, &node->base.link);
+ node->base.request = fetch_and_zero(&ref->last.request);
+
+out:
+ return &ref->last;
+}
+
+void i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ void (*retire)(struct i915_active *ref))
+{
+ ref->i915 = i915;
+ ref->retire = retire;
+ ref->tree = RB_ROOT;
+ i915_active_request_init(&ref->last, NULL, last_retire);
+ ref->count = 0;
+}
+
+int i915_active_ref(struct i915_active *ref,
+ u64 timeline,
+ struct i915_request *rq)
+{
+ struct i915_active_request *active;
+
+ active = active_instance(ref, timeline);
+ if (IS_ERR(active))
+ return PTR_ERR(active);
+
+ if (!i915_active_request_isset(active))
+ ref->count++;
+ __i915_active_request_set(active, rq);
+
+ GEM_BUG_ON(!ref->count);
+ return 0;
+}
+
+bool i915_active_acquire(struct i915_active *ref)
+{
+ lockdep_assert_held(BKL(ref));
+ return !ref->count++;
+}
+
+void i915_active_release(struct i915_active *ref)
+{
+ lockdep_assert_held(BKL(ref));
+ __active_retire(ref);
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int ret = 0;
+
+ if (i915_active_acquire(ref))
+ goto out_release;
+
+ ret = i915_active_request_retire(&ref->last, BKL(ref));
+ if (ret)
+ goto out_release;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ ret = i915_active_request_retire(&it->base, BKL(ref));
+ if (ret)
+ break;
+ }
+
+out_release:
+ i915_active_release(ref);
+ return ret;
+}
+
+int i915_request_await_active_request(struct i915_request *rq,
+ struct i915_active_request *active)
+{
+ struct i915_request *barrier =
+ i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
+
+ return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
+}
+
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int ret;
+
+ ret = i915_request_await_active_request(rq, &ref->last);
+ if (ret)
+ return ret;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ ret = i915_request_await_active_request(rq, &it->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void i915_active_fini(struct i915_active *ref)
+{
+ GEM_BUG_ON(i915_active_request_isset(&ref->last));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
+ GEM_BUG_ON(ref->count);
+}
+#endif
+
+int i915_active_request_set(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ int err;
+
+ /* Must maintain ordering wrt previous active requests */
+ err = i915_request_await_active_request(rq, active);
+ if (err)
+ return err;
+
+ __i915_active_request_set(active, rq);
+ return 0;
+}
+
+void i915_active_retire_noop(struct i915_active_request *active,
+ struct i915_request *request)
+{
+ /* Space left intentionally blank */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_active.c"
+#endif
+
+int __init i915_global_active_init(void)
+{
+ global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit i915_global_active_exit(void)
+{
+ kmem_cache_destroy(global.slab_cache);
+}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
new file mode 100644
index 000000000000..12b5c1d287d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -0,0 +1,425 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_H_
+#define _I915_ACTIVE_H_
+
+#include <linux/lockdep.h>
+
+#include "i915_active_types.h"
+#include "i915_request.h"
+
+/*
+ * We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_active_request to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_active_request is updated with i915_active_request_set() to
+ * track the most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_active_request completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_active_request.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+
+void i915_active_retire_noop(struct i915_active_request *active,
+ struct i915_request *request);
+
+/**
+ * i915_active_request_init - prepares the activity tracker for use
+ * @active - the active tracker
+ * @rq - initial request to track, can be NULL
+ * @func - a callback when then the tracker is retired (becomes idle),
+ * can be NULL
+ *
+ * i915_active_request_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+i915_active_request_init(struct i915_active_request *active,
+ struct i915_request *rq,
+ i915_active_retire_fn retire)
+{
+ RCU_INIT_POINTER(active->request, rq);
+ INIT_LIST_HEAD(&active->link);
+ active->retire = retire ?: i915_active_retire_noop;
+}
+
+#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+
+/**
+ * i915_active_request_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * __i915_active_request_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
+static inline void
+__i915_active_request_set(struct i915_active_request *active,
+ struct i915_request *request)
+{
+ list_move(&active->link, &request->active_list);
+ rcu_assign_pointer(active->request, request);
+}
+
+int __must_check
+i915_active_request_set(struct i915_active_request *active,
+ struct i915_request *rq);
+
+/**
+ * i915_active_request_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_active_request_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_active_request_set_retire_fn(struct i915_active_request *active,
+ i915_active_retire_fn fn,
+ struct mutex *mutex)
+{
+ lockdep_assert_held(mutex);
+ active->retire = fn ?: i915_active_retire_noop;
+}
+
+static inline struct i915_request *
+__i915_active_request_peek(const struct i915_active_request *active)
+{
+ /*
+ * Inside the error capture (running with the driver in an unknown
+ * state), we want to bend the rules slightly (a lot).
+ *
+ * Work is in progress to make it safer, in the meantime this keeps
+ * the known issue from spamming the logs.
+ */
+ return rcu_dereference_protected(active->request, 1);
+}
+
+/**
+ * i915_active_request_raw - return the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_raw() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the caller
+ * must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_raw(const struct i915_active_request *active,
+ struct mutex *mutex)
+{
+ return rcu_dereference_protected(active->request,
+ lockdep_is_held(mutex));
+}
+
+/**
+ * i915_active_request_peek - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_active_request_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_peek(const struct i915_active_request *active,
+ struct mutex *mutex)
+{
+ struct i915_request *request;
+
+ request = i915_active_request_raw(active, mutex);
+ if (!request || i915_request_completed(request))
+ return NULL;
+
+ return request;
+}
+
+/**
+ * i915_active_request_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct i915_request *
+i915_active_request_get(const struct i915_active_request *active,
+ struct mutex *mutex)
+{
+ return i915_request_get(i915_active_request_peek(active, mutex));
+}
+
+/**
+ * __i915_active_request_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_active_request_get() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The caller must hold the RCU read
+ * lock, but the returned pointer is safe to use outside of RCU.
+ */
+static inline struct i915_request *
+__i915_active_request_get_rcu(const struct i915_active_request *active)
+{
+ /*
+ * Performing a lockless retrieval of the active request is super
+ * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
+ * slab of request objects will not be freed whilst we hold the
+ * RCU read lock. It does not guarantee that the request itself
+ * will not be freed and then *reused*. Viz,
+ *
+ * Thread A Thread B
+ *
+ * rq = active.request
+ * retire(rq) -> free(rq);
+ * (rq is now first on the slab freelist)
+ * active.request = NULL
+ *
+ * rq = new submission on a new object
+ * ref(rq)
+ *
+ * To prevent the request from being reused whilst the caller
+ * uses it, we take a reference like normal. Whilst acquiring
+ * the reference we check that it is not in a destroyed state
+ * (refcnt == 0). That prevents the request being reallocated
+ * whilst the caller holds on to it. To check that the request
+ * was not reallocated as we acquired the reference we have to
+ * check that our request remains the active request across
+ * the lookup, in the same manner as a seqlock. The visibility
+ * of the pointer versus the reference counting is controlled
+ * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+ *
+ * In the middle of all that, we inspect whether the request is
+ * complete. Retiring is lazy so the request may be completed long
+ * before the active tracker is updated. Querying whether the
+ * request is complete is far cheaper (as it involves no locked
+ * instructions setting cachelines to exclusive) than acquiring
+ * the reference, so we do it first. The RCU read lock ensures the
+ * pointer dereference is valid, but does not ensure that the
+ * seqno nor HWS is the right one! However, if the request was
+ * reallocated, that means the active tracker's request was complete.
+ * If the new request is also complete, then both are and we can
+ * just report the active tracker is idle. If the new request is
+ * incomplete, then we acquire a reference on it and check that
+ * it remained the active request.
+ *
+ * It is then imperative that we do not zero the request on
+ * reallocation, so that we can chase the dangling pointers!
+ * See i915_request_alloc().
+ */
+ do {
+ struct i915_request *request;
+
+ request = rcu_dereference(active->request);
+ if (!request || i915_request_completed(request))
+ return NULL;
+
+ /*
+ * An especially silly compiler could decide to recompute the
+ * result of i915_request_completed, more specifically
+ * re-emit the load for request->fence.seqno. A race would catch
+ * a later seqno value, which could flip the result from true to
+ * false. Which means part of the instructions below might not
+ * be executed, while later on instructions are executed. Due to
+ * barriers within the refcounting the inconsistency can't reach
+ * past the call to i915_request_get_rcu, but not executing
+ * that while still executing i915_request_put() creates
+ * havoc enough. Prevent this with a compiler barrier.
+ */
+ barrier();
+
+ request = i915_request_get_rcu(request);
+
+ /*
+ * What stops the following rcu_access_pointer() from occurring
+ * before the above i915_request_get_rcu()? If we were
+ * to read the value before pausing to get the reference to
+ * the request, we may not notice a change in the active
+ * tracker.
+ *
+ * The rcu_access_pointer() is a mere compiler barrier, which
+ * means both the CPU and compiler are free to perform the
+ * memory read without constraint. The compiler only has to
+ * ensure that any operations after the rcu_access_pointer()
+ * occur afterwards in program order. This means the read may
+ * be performed earlier by an out-of-order CPU, or adventurous
+ * compiler.
+ *
+ * The atomic operation at the heart of
+ * i915_request_get_rcu(), see dma_fence_get_rcu(), is
+ * atomic_inc_not_zero() which is only a full memory barrier
+ * when successful. That is, if i915_request_get_rcu()
+ * returns the request (and so with the reference counted
+ * incremented) then the following read for rcu_access_pointer()
+ * must occur after the atomic operation and so confirm
+ * that this request is the one currently being tracked.
+ *
+ * The corresponding write barrier is part of
+ * rcu_assign_pointer().
+ */
+ if (!request || request == rcu_access_pointer(active->request))
+ return rcu_pointer_handoff(request);
+
+ i915_request_put(request);
+ } while (1);
+}
+
+/**
+ * i915_active_request_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_active_request_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_request_put().
+ */
+static inline struct i915_request *
+i915_active_request_get_unlocked(const struct i915_active_request *active)
+{
+ struct i915_request *request;
+
+ rcu_read_lock();
+ request = __i915_active_request_get_rcu(active);
+ rcu_read_unlock();
+
+ return request;
+}
+
+/**
+ * i915_active_request_isset - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * i915_active_request_isset() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+i915_active_request_isset(const struct i915_active_request *active)
+{
+ return rcu_access_pointer(active->request);
+}
+
+/**
+ * i915_active_request_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_active_request_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_active_request_retire(struct i915_active_request *active,
+ struct mutex *mutex)
+{
+ struct i915_request *request;
+ long ret;
+
+ request = i915_active_request_raw(active, mutex);
+ if (!request)
+ return 0;
+
+ ret = i915_request_wait(request,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ list_del_init(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+ active->retire(active, request);
+
+ return 0;
+}
+
+/*
+ * GPU activity tracking
+ *
+ * Each set of commands submitted to the GPU compromises a single request that
+ * signals a fence upon completion. struct i915_request combines the
+ * command submission, scheduling and fence signaling roles. If we want to see
+ * if a particular task is complete, we need to grab the fence (struct
+ * i915_request) for that task and check or wait for it to be signaled. More
+ * often though we want to track the status of a bunch of tasks, for example
+ * to wait for the GPU to finish accessing some memory across a variety of
+ * different command pipelines from different clients. We could choose to
+ * track every single request associated with the task, but knowing that
+ * each request belongs to an ordered timeline (later requests within a
+ * timeline must wait for earlier requests), we need only track the
+ * latest request in each timeline to determine the overall status of the
+ * task.
+ *
+ * struct i915_active provides this tracking across timelines. It builds a
+ * composite shared-fence, and is updated as new work is submitted to the task,
+ * forming a snapshot of the current status. It should be embedded into the
+ * different resources that need to track their associated GPU activity to
+ * provide a callback when that GPU activity has ceased, or otherwise to
+ * provide a serialisation point either for request submission or for CPU
+ * synchronisation.
+ */
+
+void i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ void (*retire)(struct i915_active *ref));
+
+int i915_active_ref(struct i915_active *ref,
+ u64 timeline,
+ struct i915_request *rq);
+
+int i915_active_wait(struct i915_active *ref);
+
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref);
+int i915_request_await_active_request(struct i915_request *rq,
+ struct i915_active_request *active);
+
+bool i915_active_acquire(struct i915_active *ref);
+
+static inline void i915_active_cancel(struct i915_active *ref)
+{
+ GEM_BUG_ON(ref->count != 1);
+ ref->count = 0;
+}
+
+void i915_active_release(struct i915_active *ref);
+
+static inline bool
+i915_active_is_idle(const struct i915_active *ref)
+{
+ return !ref->count;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void i915_active_fini(struct i915_active *ref);
+#else
+static inline void i915_active_fini(struct i915_active *ref) { }
+#endif
+
+int i915_global_active_init(void);
+void i915_global_active_exit(void);
+
+#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
new file mode 100644
index 000000000000..b679253b53a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_TYPES_H_
+#define _I915_ACTIVE_TYPES_H_
+
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+struct drm_i915_private;
+struct i915_active_request;
+struct i915_request;
+
+typedef void (*i915_active_retire_fn)(struct i915_active_request *,
+ struct i915_request *);
+
+struct i915_active_request {
+ struct i915_request __rcu *request;
+ struct list_head link;
+ i915_active_retire_fn retire;
+};
+
+struct i915_active {
+ struct drm_i915_private *i915;
+
+ struct rb_root tree;
+ struct i915_active_request last;
+ unsigned int count;
+
+ void (*retire)(struct i915_active *ref);
+};
+
+#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 95478db9998b..33e8eed64423 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->i915))
+ if (!IS_GEN(engine->i915, 7))
return;
switch (engine->id) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 40a61ef9aac1..0bd890c04fe4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,12 +26,15 @@
*
*/
-#include <linux/debugfs.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
#include "intel_drv.h"
#include "intel_guc_submission.h"
+#include "i915_reset.h"
+
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
@@ -48,7 +51,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
intel_device_info_dump_flags(info, &p);
- intel_device_info_dump_runtime(info, &p);
+ intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
intel_driver_caps_print(&dev_priv->caps, &p);
kernel_param_lock(THIS_MODULE);
@@ -157,14 +160,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (i915_vma_is_pinned(vma))
pin_count++;
}
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_global)
seq_printf(m, " (global)");
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -204,7 +207,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (vma->fence)
seq_printf(m, " , fence: %d%s",
vma->fence->id,
- i915_gem_active_isset(&vma->last_fence) ? "*" : "");
+ i915_active_request_isset(&vma->last_fence) ? "*" : "");
seq_puts(m, ")");
}
if (obj->stolen)
@@ -297,11 +300,12 @@ out:
}
struct file_stats {
- struct drm_i915_file_private *file_priv;
+ struct i915_address_space *vm;
unsigned long count;
u64 total, unbound;
u64 global, shared;
u64 active, inactive;
+ u64 closed;
};
static int per_file_stats(int id, void *ptr, void *data)
@@ -319,16 +323,14 @@ static int per_file_stats(int id, void *ptr, void *data)
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
if (i915_vma_is_ggtt(vma)) {
stats->global += vma->node.size;
} else {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
-
- if (ppgtt->vm.file != stats->file_priv)
+ if (vma->vm != stats->vm)
continue;
}
@@ -336,6 +338,9 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->active += vma->node.size;
else
stats->inactive += vma->node.size;
+
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
}
return 0;
@@ -343,7 +348,7 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
@@ -351,20 +356,19 @@ static int per_file_stats(int id, void *ptr, void *data)
stats.inactive, \
stats.global, \
stats.shared, \
- stats.unbound); \
+ stats.unbound, \
+ stats.closed); \
} while (0)
static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
- struct file_stats stats;
struct intel_engine_cs *engine;
+ struct file_stats stats = {};
enum intel_engine_id id;
int j;
- memset(&stats, 0, sizeof(stats));
-
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
@@ -377,44 +381,47 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
-static int per_file_ctx_stats(int idx, void *ptr, void *data)
+static void print_context_stats(struct seq_file *m,
+ struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx = ptr;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct file_stats kstats = {};
+ struct i915_gem_context *ctx;
- for_each_engine(engine, ctx->i915, id) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- if (ce->state)
- per_file_stats(0, ce->state->obj, data);
- if (ce->ring)
- per_file_stats(0, ce->ring->vma->obj, data);
- }
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce = to_intel_context(ctx, engine);
- return 0;
-}
+ if (ce->state)
+ per_file_stats(0, ce->state->obj, &kstats);
+ if (ce->ring)
+ per_file_stats(0, ce->ring->vma->obj, &kstats);
+ }
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct file_stats stats;
- struct drm_file *file;
+ if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+ struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+ struct drm_file *file = ctx->file_priv->file;
+ struct task_struct *task;
+ char name[80];
- memset(&stats, 0, sizeof(stats));
+ spin_lock(&file->table_lock);
+ idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->kernel_context)
- per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+ rcu_read_lock();
+ task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
+ snprintf(name, sizeof(name), "%s/%d",
+ task ? task->comm : "<unknown>",
+ ctx->user_handle);
+ rcu_read_unlock();
- list_for_each_entry(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *fpriv = file->driver_priv;
- idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+ print_file_stats(m, name, stats);
+ }
}
- mutex_unlock(&dev->struct_mutex);
- print_file_stats(m, "[k]contexts", stats);
+ print_file_stats(m, "[k]contexts", kstats);
}
static int i915_gem_object_info(struct seq_file *m, void *data)
@@ -426,14 +433,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
struct drm_i915_gem_object *obj;
unsigned int page_sizes = 0;
- struct drm_file *file;
char buf[80];
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@@ -514,43 +516,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
buf, sizeof(buf)));
seq_putc(m, '\n');
- print_batch_pool_stats(m, dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- mutex_lock(&dev->filelist_mutex);
- print_context_stats(m, dev_priv);
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct file_stats stats;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_request *request;
- struct task_struct *task;
-
- mutex_lock(&dev->struct_mutex);
- memset(&stats, 0, sizeof(stats));
- stats.file_priv = file->driver_priv;
- spin_lock(&file->table_lock);
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
- /*
- * Although we have a valid reference on file->pid, that does
- * not guarantee that the task_struct who called get_pid() is
- * still alive (e.g. get_pid(current) => fork() => exit()).
- * Therefore, we need to protect this ->comm access using RCU.
- */
- request = list_first_entry_or_null(&file_priv->mm.request_list,
- struct i915_request,
- client_link);
- rcu_read_lock();
- task = pid_task(request && request->gem_context->pid ?
- request->gem_context->pid : file->pid,
- PIDTYPE_PID);
- print_file_stats(m, task ? task->comm : "<unknown>", stats);
- rcu_read_unlock();
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_unlock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->filelist_mutex);
+ print_batch_pool_stats(m, dev_priv);
+ print_context_stats(m, dev_priv);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -656,10 +629,12 @@ static void gen8_display_interrupt_info(struct seq_file *m)
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!wakeref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -674,7 +649,7 @@ static void gen8_display_interrupt_info(struct seq_file *m)
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -704,11 +679,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int i, pipe;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
if (IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -724,8 +702,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ pref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -735,17 +714,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, pref);
}
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -808,10 +787,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
+ intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ pref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -820,7 +801,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, pref);
}
seq_printf(m, "Master IER:\t%08x\n",
@@ -907,7 +888,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -980,10 +961,11 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
- gpu = i915_capture_gpu_state(i915);
- intel_runtime_pm_put(i915);
+ gpu = NULL;
+ with_intel_runtime_pm(i915, wakeref)
+ gpu = i915_capture_gpu_state(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -1038,39 +1020,16 @@ static const struct file_operations i915_error_state_fops = {
};
#endif
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
- ret = i915_gem_set_global_seqno(dev, val);
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
- NULL, i915_next_seqno_set,
- "0x%llx\n");
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
int ret = 0;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1280,7 +1239,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -1319,14 +1278,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
struct intel_instdone instdone;
+ intel_wakeref_t wakeref;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
seq_puts(m, "Wedged\n");
if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
seq_puts(m, "Reset in progress: struct_mutex backoff\n");
- if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
- seq_puts(m, "Reset in progress: reset handoff to waiter\n");
if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
seq_puts(m, "Waiter holding struct mutex\n");
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
@@ -1337,17 +1295,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- intel_runtime_pm_get(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ for_each_engine(engine, dev_priv, id) {
+ acthd[id] = intel_engine_get_active_head(engine);
+ seqno[id] = intel_engine_get_seqno(engine);
+ }
- for_each_engine(engine, dev_priv, id) {
- acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_seqno(engine);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
}
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
-
- intel_runtime_pm_put(dev_priv);
-
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@@ -1360,37 +1316,16 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node *rb;
-
seq_printf(m, "%s:\n", engine->name);
- seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+ seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
- yesno(intel_engine_has_waiter(engine)),
- yesno(test_bit(engine->id,
- &dev_priv->gpu_error.missed_irq_rings)),
- yesno(engine->hangcheck.stalled),
- yesno(engine->hangcheck.wedged));
-
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
- seq_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
- }
- spin_unlock_irq(&b->rb_lock);
+ intel_engine_last_submit(engine),
+ jiffies_to_msecs(jiffies -
+ engine->hangcheck.action_timestamp));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- seq_printf(m, "\taction = %s(%d) %d ms ago\n",
- hangcheck_action_to_str(engine->hangcheck.action),
- engine->hangcheck.action,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =\n");
@@ -1622,18 +1557,17 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int err;
-
- intel_runtime_pm_get(dev_priv);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ironlake_drpc_info(m);
-
- intel_runtime_pm_put(dev_priv);
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ err = vlv_drpc_info(m);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ err = gen6_drpc_info(m);
+ else
+ err = ironlake_drpc_info(m);
+ }
return err;
}
@@ -1655,11 +1589,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
if (!HAS_FBC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
@@ -1686,7 +1621,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -1731,11 +1666,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
if (!HAS_IPS(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
@@ -1749,7 +1685,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -1757,10 +1693,10 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
bool sr_enabled = false;
- intel_runtime_pm_get(dev_priv);
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (INTEL_GEN(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
@@ -1776,8 +1712,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- intel_runtime_pm_put(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
@@ -1786,31 +1721,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- unsigned long temp, chipset, gfx;
- int ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(i915, 5))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ with_intel_runtime_pm(i915, wakeref) {
+ unsigned long temp, chipset, gfx;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- temp = i915_mch_val(dev_priv);
- chipset = i915_chipset_val(dev_priv);
- gfx = i915_gfx_val(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ temp = i915_mch_val(i915);
+ chipset = i915_chipset_val(i915);
+ gfx = i915_gfx_val(i915);
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ }
return 0;
}
@@ -1820,13 +1748,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
@@ -1859,7 +1788,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
mutex_unlock(&dev_priv->pcu_lock);
out:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -2032,15 +1961,16 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
- if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2071,141 +2001,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
-static int per_file_ctx(int id, void *ptr, void *data)
-{
- struct i915_gem_context *ctx = ptr;
- struct seq_file *m = data;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-
- if (!ppgtt) {
- seq_printf(m, " no ppgtt for context %d\n",
- ctx->user_handle);
- return 0;
- }
-
- if (i915_gem_context_is_default(ctx))
- seq_puts(m, " default context:\n");
- else
- seq_printf(m, " context %d:\n", ctx->user_handle);
- ppgtt->debug_dump(ppgtt, m);
-
- return 0;
-}
-
-static void gen8_ppgtt_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int i;
-
- if (!ppgtt)
- return;
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s\n", engine->name);
- for (i = 0; i < 4; i++) {
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
- pdp <<= 32;
- pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
- seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
- }
- }
-}
-
-static void gen6_ppgtt_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (IS_GEN6(dev_priv))
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s\n", engine->name);
- if (IS_GEN7(dev_priv))
- seq_printf(m, "GFX_MODE: 0x%08x\n",
- I915_READ(RING_MODE_GEN7(engine)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
- }
- if (dev_priv->mm.aliasing_ppgtt) {
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
-
- ppgtt->debug_dump(ppgtt, m);
- }
-
- seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
-}
-
-static int i915_ppgtt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_file *file;
- int ret;
-
- mutex_lock(&dev->filelist_mutex);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out_unlock;
-
- intel_runtime_pm_get(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_info(m, dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_ppgtt_info(m, dev_priv);
-
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct task_struct *task;
-
- task = get_pid_task(file->pid, PIDTYPE_PID);
- if (!task) {
- ret = -ESRCH;
- goto out_rpm;
- }
- seq_printf(m, "\nproc: %s\n", task->comm);
- put_task_struct(task);
- idr_for_each(&file_priv->context_idr, per_file_ctx,
- (void *)(unsigned long)m);
- }
-
-out_rpm:
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-out_unlock:
- mutex_unlock(&dev->filelist_mutex);
- return ret;
-}
-
-static int count_irq_waiters(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int count = 0;
-
- for_each_engine(engine, i915, id)
- count += intel_engine_has_waiter(engine);
-
- return count;
-}
-
static const char *rps_power_to_str(unsigned int power)
{
static const char * const strings[] = {
@@ -2226,9 +2026,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
+ intel_wakeref_t wakeref;
struct drm_file *file;
- if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
act_freq = vlv_punit_read(dev_priv,
@@ -2239,13 +2040,11 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
}
- intel_runtime_pm_put(dev_priv);
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
- seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
@@ -2322,6 +2121,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct drm_printer p;
if (!HAS_HUC(dev_priv))
@@ -2330,9 +2130,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
- intel_runtime_pm_get(dev_priv);
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
}
@@ -2340,8 +2139,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct drm_printer p;
- u32 tmp, i;
if (!HAS_GUC(dev_priv))
return -ENODEV;
@@ -2349,22 +2148,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
- intel_runtime_pm_get(dev_priv);
-
- tmp = I915_READ(GUC_STATUS);
-
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
- seq_printf(m, "\tBootrom status = 0x%x\n",
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
- seq_printf(m, "\tuKernel status = 0x%x\n",
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
- seq_printf(m, "\tMIA Core status = 0x%x\n",
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
- seq_puts(m, "\nScratch registers:\n");
- for (i = 0; i < 16; i++)
- seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 tmp = I915_READ(GUC_STATUS);
+ u32 i;
+
+ seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+ seq_printf(m, "\tBootrom status = 0x%x\n",
+ (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ seq_printf(m, "\tuKernel status = 0x%x\n",
+ (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ seq_printf(m, "\tMIA Core status = 0x%x\n",
+ (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ seq_puts(m, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ seq_printf(m, "\t%2d: \t0x%x\n",
+ i, I915_READ(SOFT_SCRATCH(i)));
+ }
+ }
return 0;
}
@@ -2416,7 +2216,7 @@ static void i915_guc_client_info(struct seq_file *m,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint64_t tot = 0;
+ u64 tot = 0;
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
@@ -2671,7 +2471,8 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
{
- u32 val, psr_status;
+ u32 val, status_val;
+ const char *status = "unknown";
if (dev_priv->psr.psr2_enabled) {
static const char * const live_status[] = {
@@ -2687,14 +2488,11 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- psr_status = I915_READ(EDP_PSR2_STATUS);
- val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status)) {
- seq_printf(m, "Source PSR status: 0x%x [%s]\n",
- psr_status, live_status[val]);
- return;
- }
+ val = I915_READ(EDP_PSR2_STATUS);
+ status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
} else {
static const char * const live_status[] = {
"IDLE",
@@ -2706,74 +2504,102 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- psr_status = I915_READ(EDP_PSR_STATUS);
- val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (val < ARRAY_SIZE(live_status)) {
- seq_printf(m, "Source PSR status: 0x%x [%s]\n",
- psr_status, live_status[val]);
- return;
- }
+ val = I915_READ(EDP_PSR_STATUS);
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
}
- seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 psrperf = 0;
- bool enabled = false;
- bool sink_support;
+ struct i915_psr *psr = &dev_priv->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
if (!HAS_PSR(dev_priv))
return -ENODEV;
- sink_support = dev_priv->psr.sink_support;
- seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
- if (!sink_support)
- return 0;
+ seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ if (psr->dp)
+ seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
- intel_runtime_pm_get(dev_priv);
+ if (!psr->sink_support)
+ return 0;
- mutex_lock(&dev_priv->psr.lock);
- seq_printf(m, "PSR mode: %s\n",
- dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
- seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
- seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
- dev_priv->psr.busy_frontbuffer_bits);
+ wakeref = intel_runtime_pm_get(dev_priv);
+ mutex_lock(&psr->lock);
- if (dev_priv->psr.psr2_enabled)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
- seq_printf(m, "Main link in standby mode: %s\n",
- yesno(dev_priv->psr.link_standby));
+ if (!psr->enabled)
+ goto unlock;
- seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
+ if (psr->psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = I915_READ(EDP_PSR_CTL);
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ enableddisabled(enabled), val);
+ psr_source_status(dev_priv, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- psrperf = I915_READ(EDP_PSR_PERF_CNT) &
- EDP_PSR_PERF_CNT_MASK;
+ val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
}
- psr_source_status(dev_priv, m);
- mutex_unlock(&dev_priv->psr.lock);
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
- if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- dev_priv->psr.last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n",
- dev_priv->psr.last_exit);
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
+ su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
}
- intel_runtime_pm_put(dev_priv);
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(dev_priv, wakeref);
+
return 0;
}
@@ -2782,6 +2608,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_modeset_acquire_ctx ctx;
+ intel_wakeref_t wakeref;
int ret;
if (!CAN_PSR(dev_priv))
@@ -2789,7 +2616,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
@@ -2804,7 +2631,7 @@ retry:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -2829,24 +2656,20 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
unsigned long long power;
+ intel_wakeref_t wakeref;
u32 units;
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
-
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
- intel_runtime_pm_put(dev_priv);
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
return -ENODEV;
- }
units = (power & 0x1f00) >> 8;
- power = I915_READ(MCH_SECP_NRG_STTS);
- power = (1000000 * power) >> units; /* convert to uJ */
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ power = I915_READ(MCH_SECP_NRG_STTS);
+ power = (1000000 * power) >> units; /* convert to uJ */
seq_printf(m, "%llu", power);
return 0;
@@ -2860,6 +2683,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
+ seq_printf(m, "Runtime power status: %s\n",
+ enableddisabled(!dev_priv->power_domains.wakeref));
+
seq_printf(m, "GPU idle: %s (epoch %u)\n",
yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n",
@@ -2874,6 +2700,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
pci_power_name(pdev->current_state),
pdev->current_state);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ print_intel_runtime_pm_wakeref(dev_priv, &p);
+ }
+
return 0;
}
@@ -2908,6 +2740,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct intel_csr *csr;
if (!HAS_CSR(dev_priv))
@@ -2915,7 +2748,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
csr = &dev_priv->csr;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2941,7 +2774,7 @@ out:
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -2954,14 +2787,7 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
for (i = 0; i < tabs; i++)
seq_putc(m, '\t');
- seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
static void intel_encoder_info(struct seq_file *m,
@@ -3133,14 +2959,13 @@ static const char *plane_type(enum drm_plane_type type)
return "unknown";
}
-static const char *plane_rotation(unsigned int rotation)
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
{
- static char buf[48];
/*
* According to doc only one DRM_MODE_ROTATE_ is allowed but this
* will print them all to visualize if the values are misused
*/
- snprintf(buf, sizeof(buf),
+ snprintf(buf, bufsize,
"%s%s%s%s%s%s(0x%08x)",
(rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
(rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
@@ -3149,8 +2974,6 @@ static const char *plane_rotation(unsigned int rotation)
(rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
(rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
rotation);
-
- return buf;
}
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
@@ -3163,6 +2986,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
struct drm_format_name_buf format_name;
+ char rot_str[48];
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3178,6 +3002,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
sprintf(format_name.str, "N/A");
}
+ plane_rotation(rot_str, sizeof(rot_str), state->rotation);
+
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
plane->base.id,
plane_type(intel_plane->base.type),
@@ -3192,7 +3018,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
format_name.str,
- plane_rotation(state->rotation));
+ rot_str);
}
}
@@ -3231,8 +3057,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(dev_priv);
- intel_runtime_pm_get(dev_priv);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
@@ -3280,7 +3108,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -3289,23 +3117,24 @@ static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
enum intel_engine_id id;
struct drm_printer p;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s (epoch %u)\n",
yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
- dev_priv->info.cs_timestamp_frequency_khz);
+ RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -3315,7 +3144,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+ intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
return 0;
}
@@ -3418,20 +3247,21 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- int ret;
+ intel_wakeref_t wakeref;
bool enable;
+ int ret;
ret = kstrtobool_from_user(ubuf, len, &enable);
if (ret < 0)
return ret;
- intel_runtime_pm_get(dev_priv);
- if (!dev_priv->ipc_enabled && enable)
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
return len;
}
@@ -3799,7 +3629,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
@@ -3842,7 +3672,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3857,7 +3687,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3872,7 +3702,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3898,7 +3728,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
return -ENODEV;
return single_open(file, spr_wm_latency_show, dev_priv);
@@ -3908,19 +3738,19 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
return -ENODEV;
return single_open(file, cur_wm_latency_show, dev_priv);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, uint16_t wm[8])
+ size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
- uint16_t new[8] = { 0 };
+ u16 new[8] = { 0 };
int num_levels;
int level;
int ret;
@@ -3965,7 +3795,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3980,7 +3810,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3995,7 +3825,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -4046,8 +3876,6 @@ static int
i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- struct intel_engine_cs *engine;
- unsigned int tmp;
/*
* There is no safeguard against this debugfs entry colliding
@@ -4060,18 +3888,8 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_backoff(&i915->gpu_error))
return -EAGAIN;
- for_each_engine_masked(engine, i915, val, tmp) {
- engine->hangcheck.seqno = intel_engine_get_seqno(engine);
- engine->hangcheck.stalled = true;
- }
-
i915_handle_error(i915, val, I915_ERROR_CAPTURE,
"Manually set wedged engine mask = %llx", val);
-
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE);
-
return 0;
}
@@ -4079,94 +3897,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
-static int
-fault_irq_set(struct drm_i915_private *i915,
- unsigned long *irq,
- unsigned long val)
-{
- int err;
-
- err = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (err)
- return err;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED |
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_unlock;
-
- *irq = val;
- mutex_unlock(&i915->drm.struct_mutex);
-
- /* Flush idle worker to disarm irq */
- drain_delayed_work(&i915->gt.idle_work);
-
- return 0;
-
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
-}
-
-static int
-i915_ring_missed_irq_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- *val = dev_priv->gpu_error.missed_irq_rings;
- return 0;
-}
-
-static int
-i915_ring_missed_irq_set(void *data, u64 val)
-{
- struct drm_i915_private *i915 = data;
-
- return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
- i915_ring_missed_irq_get, i915_ring_missed_irq_set,
- "0x%08llx\n");
-
-static int
-i915_ring_test_irq_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- *val = dev_priv->gpu_error.test_irq_rings;
-
- return 0;
-}
-
-static int
-i915_ring_test_irq_set(void *data, u64 val)
-{
- struct drm_i915_private *i915 = data;
-
- /* GuC keeps the user interrupt permanently enabled for submission */
- if (USES_GUC_SUBMISSION(i915))
- return -ENODEV;
-
- /*
- * From icl, we can no longer individually mask interrupt generation
- * from each engine.
- */
- if (INTEL_GEN(i915) >= 11)
- return -ENODEV;
-
- val &= INTEL_INFO(i915)->ring_mask;
- DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
- return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
- i915_ring_test_irq_get, i915_ring_test_irq_set,
- "0x%08llx\n");
-
#define DROP_UNBOUND BIT(0)
#define DROP_BOUND BIT(1)
#define DROP_RETIRE BIT(2)
@@ -4197,13 +3927,15 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ intel_wakeref_t wakeref;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
- if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
+ if (val & DROP_RESET_ACTIVE &&
+ wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
i915_gem_set_wedged(i915);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
@@ -4219,22 +3951,14 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (ret == 0 && val & DROP_RESET_SEQNO)
- ret = i915_gem_set_global_seqno(&i915->drm, 1);
-
if (val & DROP_RETIRE)
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
- if (val & DROP_RESET_ACTIVE &&
- i915_terminally_wedged(&i915->gpu_error)) {
+ if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
i915_handle_error(i915, ALL_ENGINES, 0, NULL);
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE);
- }
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -4259,7 +3983,7 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_drain_freed_objects(i915);
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return ret;
}
@@ -4272,16 +3996,14 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- u32 snpcr;
+ intel_wakeref_t wakeref;
+ u32 snpcr = 0;
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
-
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -4292,24 +4014,25 @@ static int
i915_cache_sharing_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- u32 snpcr;
+ intel_wakeref_t wakeref;
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
if (val > 3)
return -EINVAL;
- intel_runtime_pm_get(dev_priv);
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 snpcr;
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ }
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-
- intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -4354,7 +4077,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
#define SS_MAX 6
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
@@ -4410,7 +4133,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
#define SS_MAX 3
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
@@ -4438,7 +4161,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask[s] =
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
@@ -4472,10 +4195,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
if (sseu->slice_mask) {
sseu->eu_per_subslice =
- INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+ RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
for (s = 0; s < fls(sseu->slice_mask); s++) {
sseu->subslice_mask[s] =
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
}
sseu->eu_total = sseu->eu_per_subslice *
sseu_subslice_total(sseu);
@@ -4483,7 +4206,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
u8 subslice_7eu =
- INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -4531,34 +4254,32 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct sseu_dev_info sseu;
+ intel_wakeref_t wakeref;
if (INTEL_GEN(dev_priv) < 8)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+ sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+ sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
sseu.max_eus_per_subslice =
- INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
-
- intel_runtime_pm_get(dev_priv);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_sseu_device_status(dev_priv, &sseu);
- } else if (IS_BROADWELL(dev_priv)) {
- broadwell_sseu_device_status(dev_priv, &sseu);
- } else if (IS_GEN9(dev_priv)) {
- gen9_sseu_device_status(dev_priv, &sseu);
- } else if (INTEL_GEN(dev_priv) >= 10) {
- gen10_sseu_device_status(dev_priv, &sseu);
+ RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_sseu_device_status(dev_priv, &sseu);
+ else if (IS_BROADWELL(dev_priv))
+ broadwell_sseu_device_status(dev_priv, &sseu);
+ else if (IS_GEN(dev_priv, 9))
+ gen9_sseu_device_status(dev_priv, &sseu);
+ else if (INTEL_GEN(dev_priv) >= 10)
+ gen10_sseu_device_status(dev_priv, &sseu);
}
- intel_runtime_pm_put(dev_priv);
-
i915_print_sseu_info(m, false, &sseu);
return 0;
@@ -4571,7 +4292,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- intel_runtime_pm_get(i915);
+ file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
intel_uncore_forcewake_user_get(i915);
return 0;
@@ -4585,7 +4306,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
intel_uncore_forcewake_user_put(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915,
+ (intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
}
@@ -4912,7 +4634,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
@@ -4939,15 +4660,12 @@ static const struct i915_debugfs_files {
} i915_debugfs_files[] = {
{"i915_wedged", &i915_wedged_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
- {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
- {"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_next_seqno", &i915_next_seqno_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -5020,7 +4738,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
enc_to_intel_dp(&intel_attached_encoder(connector)->base);
- uint8_t buf[16];
+ u8 buf[16];
ssize_t err;
int i;
@@ -5094,6 +4812,105 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ ret = -EINTR;
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ yesno(crtc_state->dsc_params.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (len == 0)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
+ len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -5106,6 +4923,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
int i915_debugfs_connector_add(struct drm_connector *connector)
{
struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
/* The connector must have been registered beforehands. */
if (!root)
@@ -5130,5 +4948,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_hdcp_sink_capability_fops);
}
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+ connector, &i915_dsc_fec_support_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b310a897a4ad..6630212f2faf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -41,14 +41,16 @@
#include <linux/vt.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
+#include "i915_reset.h"
#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
@@ -132,15 +134,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -217,9 +219,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
- else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
@@ -349,7 +351,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = HAS_LEGACY_SEMAPHORES(dev_priv);
+ value = 0;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -358,12 +360,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
+ value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
- value = INTEL_INFO(dev_priv)->sseu.eu_total;
+ value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
if (!value)
return -ENODEV;
break;
@@ -380,7 +382,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
- value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+ value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
value = intel_huc_check_status(&dev_priv->huc);
@@ -430,17 +432,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+ value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
+ value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+ value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
@@ -906,6 +908,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->pps_mutex);
i915_memcpy_init_early(dev_priv);
+ intel_runtime_pm_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
@@ -966,7 +969,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
+ mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -1341,7 +1344,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (IS_GEN9(dev_priv))
+ else if (IS_GEN(dev_priv, 9))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,7 +1377,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+ intel_device_info_runtime_init(dev_priv);
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
@@ -1436,7 +1439,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1574,7 +1577,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
acpi_video_register();
}
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
intel_gpu_ips_init(dev_priv);
intel_audio_init(dev_priv);
@@ -1636,8 +1639,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
- intel_device_info_dump(&dev_priv->info, &p);
- intel_device_info_dump_runtime(&dev_priv->info, &p);
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ INTEL_GEN(dev_priv));
+
+ intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
+ intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1674,7 +1683,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
- device_info->device_id = pdev->device;
+ RUNTIME_INFO(i915)->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
BITS_PER_TYPE(device_info->platform_mask));
@@ -1774,6 +1783,9 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /* Flush any external code that still may be under the RCU lock */
+ synchronize_rcu();
+
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
@@ -1802,8 +1814,7 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_cleanup_mmio(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
-
- WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+ intel_runtime_pm_cleanup(dev_priv);
}
static void i915_driver_release(struct drm_device *dev)
@@ -2005,6 +2016,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(dev_priv);
+ if (!dev_priv->uncore.user_forcewake.count)
+ intel_runtime_pm_cleanup(dev_priv);
return ret;
}
@@ -2174,7 +2187,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
- intel_engines_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2195,210 +2208,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
-/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
- * @stalled_mask: mask of the stalled engines with the guilty requests
- * @reason: user error message for why we are resetting
- *
- * Reset the chip. Useful if a hang is detected. Marks the device as wedged
- * on failure.
- *
- * Caller must hold the struct_mutex.
- *
- * Procedure is fairly simple:
- * - reset the chip using the reset reg
- * - re-init context state
- * - re-init hardware status page
- * - re-init ring buffer
- * - re-init interrupt state
- * - re-init display
- */
-void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
- const char *reason)
-{
- struct i915_gpu_error *error = &i915->gpu_error;
- int ret;
- int i;
-
- GEM_TRACE("flags=%lx\n", error->flags);
-
- might_sleep();
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
-
- if (!test_bit(I915_RESET_HANDOFF, &error->flags))
- return;
-
- /* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(i915))
- goto wakeup;
-
- if (reason)
- dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
- error->reset_count++;
-
- ret = i915_gem_reset_prepare(i915);
- if (ret) {
- dev_err(i915->drm.dev, "GPU recovery failed\n");
- goto taint;
- }
-
- if (!intel_has_gpu_reset(i915)) {
- if (i915_modparams.reset)
- dev_err(i915->drm.dev, "GPU reset not supported\n");
- else
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
- goto error;
- }
-
- for (i = 0; i < 3; i++) {
- ret = intel_gpu_reset(i915, ALL_ENGINES);
- if (ret == 0)
- break;
-
- msleep(100);
- }
- if (ret) {
- dev_err(i915->drm.dev, "Failed to reset chip\n");
- goto taint;
- }
-
- /* Ok, now get things going again... */
-
- /*
- * Everything depends on having the GTT running, so we need to start
- * there.
- */
- ret = i915_ggtt_enable_hw(i915);
- if (ret) {
- DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
- ret);
- goto error;
- }
-
- i915_gem_reset(i915, stalled_mask);
- intel_overlay_reset(i915);
-
- /*
- * Next we need to restore the context, but we don't use those
- * yet either...
- *
- * Ring buffer needs to be re-initialized in the KMS case, or if X
- * was running at the time of the reset (i.e. we weren't VT
- * switched away).
- */
- ret = i915_gem_init_hw(i915);
- if (ret) {
- DRM_ERROR("Failed to initialise HW following reset (%d)\n",
- ret);
- goto error;
- }
-
- i915_queue_hangcheck(i915);
-
-finish:
- i915_gem_reset_finish(i915);
-wakeup:
- clear_bit(I915_RESET_HANDOFF, &error->flags);
- wake_up_bit(&error->flags, I915_RESET_HANDOFF);
- return;
-
-taint:
- /*
- * History tells us that if we cannot reset the GPU now, we
- * never will. This then impacts everything that is run
- * subsequently. On failing the reset, we mark the driver
- * as wedged, preventing further execution on the GPU.
- * We also want to go one step further and add a taint to the
- * kernel so that any subsequent faults can be traced back to
- * this failure. This is important for CI, where if the
- * GPU/driver fails we would like to reboot and restart testing
- * rather than continue on into oblivion. For everyone else,
- * the system should still plod along, but they have been warned!
- */
- add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-error:
- i915_gem_set_wedged(i915);
- i915_retire_requests(i915);
- goto finish;
-}
-
-static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
-}
-
-/**
- * i915_reset_engine - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
-{
- struct i915_gpu_error *error = &engine->i915->gpu_error;
- struct i915_request *active_request;
- int ret;
-
- GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
- GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
-
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR_OR_NULL(active_request)) {
- /* Either the previous reset failed, or we pardon the reset. */
- ret = PTR_ERR(active_request);
- goto out;
- }
-
- if (msg)
- dev_notice(engine->i915->drm.dev,
- "Resetting %s for %s\n", engine->name, msg);
- error->reset_engine_count[engine->id]++;
-
- if (!engine->i915->guc.execbuf_client)
- ret = intel_gt_reset_engine(engine->i915, engine);
- else
- ret = intel_guc_reset_engine(&engine->i915->guc, engine);
- if (ret) {
- /* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->i915->guc.execbuf_client ? "GuC " : "",
- engine->name, ret);
- goto out;
- }
-
- /*
- * The request that caused the hang is stuck on elsp, we know the
- * active request and can drop it, adjust head to skip the offending
- * request to resume executing remaining requests in the queue.
- */
- i915_gem_reset_engine(engine, active_request, true);
-
- /*
- * The engine and its registers (and workarounds in case of render)
- * have been reset to their default values. Follow the init_ring
- * process to program RING_MODE, HWSP and re-enable submission.
- */
- ret = engine->init_hw(engine);
- if (ret)
- goto out;
-
-out:
- intel_engine_cancel_stop_cs(engine);
- i915_gem_reset_finish_engine(engine);
- return ret;
-}
-
static int i915_pm_prepare(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2736,6 +2545,10 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
u32 mask, u32 val)
{
+ i915_reg_t reg = VLV_GTLC_PW_STATUS;
+ u32 reg_value;
+ int ret;
+
/* The HW does not like us polling for PW_STATUS frequently, so
* use the sleeping loop rather than risk the busy spin within
* intel_wait_for_register().
@@ -2743,8 +2556,12 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
* Transitioning between RC6 states should be at most 2ms (see
* valleyview_enable_rps) so use a 3ms timeout.
*/
- return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
- 3);
+ ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+ return ret;
}
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
@@ -2959,7 +2776,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(dev_priv);
- WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+ intel_runtime_pm_cleanup(dev_priv);
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
@@ -3203,7 +3020,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1c31967194b..9adc7bb9e69c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -45,8 +45,8 @@
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
+#include <linux/stackdepot.h>
-#include <drm/drmP.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
@@ -54,6 +54,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
+#include <drm/drm_connector.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -90,8 +91,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20181204"
-#define DRIVER_TIMESTAMP 1543944377
+#define DRIVER_DATE "20190207"
+#define DRIVER_TIMESTAMP 1549572331
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -130,6 +131,8 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+typedef depot_stack_handle_t intel_wakeref_t;
+
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -281,16 +284,14 @@ struct drm_i915_display_funcs {
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
- int (*compute_intermediate_wm)(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *newstate);
+ int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
- int (*compute_global_watermarks)(struct drm_atomic_state *state);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -322,8 +323,20 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
- void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
- void (*load_luts)(struct drm_crtc_state *crtc_state);
+ /*
+ * Program double buffered color management registers during
+ * vblank evasion. The registers should then latch during the
+ * next vblank start, alongside any other double buffered registers
+ * involved with the same commit.
+ */
+ void (*color_commit)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Load LUTs (and other single buffered color management
+ * registers). Will (hopefully) be called during the vblank
+ * following the latching of any double buffered registers
+ * involved with the same commit.
+ */
+ void (*load_luts)(const struct intel_crtc_state *crtc_state);
};
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
@@ -333,16 +346,17 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
- uint32_t required_version;
- uint32_t max_fw_size; /* bytes */
- uint32_t *dmc_payload;
- uint32_t dmc_fw_size; /* dwords */
- uint32_t version;
- uint32_t mmio_count;
+ u32 required_version;
+ u32 max_fw_size; /* bytes */
+ u32 *dmc_payload;
+ u32 dmc_fw_size; /* dwords */
+ u32 version;
+ u32 mmio_count;
i915_reg_t mmioaddr[8];
- uint32_t mmiodata[8];
- uint32_t dc_state;
- uint32_t allowed_dc_mask;
+ u32 mmiodata[8];
+ u32 dc_state;
+ u32 allowed_dc_mask;
+ intel_wakeref_t wakeref;
};
enum i915_cache_level {
@@ -398,7 +412,7 @@ struct intel_fbc {
struct {
unsigned int mode_flags;
- uint32_t hsw_bdw_pixel_rate;
+ u32 hsw_bdw_pixel_rate;
} crtc;
struct {
@@ -417,7 +431,7 @@ struct intel_fbc {
int y;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
} plane;
struct {
@@ -509,6 +523,7 @@ struct i915_psr {
ktime_t last_exit;
bool sink_not_reliable;
bool irq_aux_error;
+ u16 su_x_granularity;
};
enum intel_pch {
@@ -556,7 +571,7 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -819,6 +834,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ intel_wakeref_t wakeref;
+
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
struct i915_power_well *power_wells;
@@ -901,9 +918,9 @@ struct i915_gem_mm {
atomic_t bsd_engine_dispatch_index;
/** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
+ u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
+ u32 bit_6_swizzle_y;
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
@@ -930,18 +947,20 @@ struct ddi_vbt_port_info {
* populate this field.
*/
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
- uint8_t hdmi_level_shift;
+ u8 hdmi_level_shift;
- uint8_t supports_dvi:1;
- uint8_t supports_hdmi:1;
- uint8_t supports_dp:1;
- uint8_t supports_edp:1;
+ u8 supports_dvi:1;
+ u8 supports_hdmi:1;
+ u8 supports_dp:1;
+ u8 supports_edp:1;
+ u8 supports_typec_usb:1;
+ u8 supports_tbt:1;
- uint8_t alternate_aux_channel;
- uint8_t alternate_ddc_pin;
+ u8 alternate_aux_channel;
+ u8 alternate_ddc_pin;
- uint8_t dp_boost_level;
- uint8_t hdmi_boost_level;
+ u8 dp_boost_level;
+ u8 hdmi_boost_level;
int dp_max_link_rate; /* 0 for not limited by VBT */
};
@@ -1032,41 +1051,41 @@ enum intel_ddb_partitioning {
struct intel_wm_level {
bool enable;
- uint32_t pri_val;
- uint32_t spr_val;
- uint32_t cur_val;
- uint32_t fbc_val;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
};
struct ilk_wm_values {
- uint32_t wm_pipe[3];
- uint32_t wm_lp[3];
- uint32_t wm_lp_spr[3];
- uint32_t wm_linetime[3];
+ u32 wm_pipe[3];
+ u32 wm_lp[3];
+ u32 wm_lp_spr[3];
+ u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
struct g4x_pipe_wm {
- uint16_t plane[I915_MAX_PLANES];
- uint16_t fbc;
+ u16 plane[I915_MAX_PLANES];
+ u16 fbc;
};
struct g4x_sr_wm {
- uint16_t plane;
- uint16_t cursor;
- uint16_t fbc;
+ u16 plane;
+ u16 cursor;
+ u16 fbc;
};
struct vlv_wm_ddl_values {
- uint8_t plane[I915_MAX_PLANES];
+ u8 plane[I915_MAX_PLANES];
};
struct vlv_wm_values {
struct g4x_pipe_wm pipe[3];
struct g4x_sr_wm sr;
struct vlv_wm_ddl_values ddl[3];
- uint8_t level;
+ u8 level;
bool cxsr;
};
@@ -1080,10 +1099,10 @@ struct g4x_wm_values {
};
struct skl_ddb_entry {
- uint16_t start, end; /* in number of blocks, 'end' is exclusive */
+ u16 start, end; /* in number of blocks, 'end' is exclusive */
};
-static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
{
return entry->end - entry->start;
}
@@ -1107,8 +1126,9 @@ struct skl_ddb_values {
};
struct skl_wm_level {
- uint16_t plane_res_b;
- uint8_t plane_res_l;
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
bool plane_en;
};
@@ -1117,15 +1137,15 @@ struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
bool is_planar;
- uint32_t width;
- uint8_t cpp;
- uint32_t plane_pixel_rate;
- uint32_t y_min_scanlines;
- uint32_t plane_bytes_per_line;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum;
- uint32_t linetime_us;
- uint32_t dbuf_block_size;
+ u32 linetime_us;
+ u32 dbuf_block_size;
};
/*
@@ -1155,6 +1175,25 @@ struct i915_runtime_pm {
atomic_t wakeref_count;
bool suspended;
bool irqs_enabled;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ /*
+ * To aide detection of wakeref leaks and general misuse, we
+ * track all wakeref holders. With manual markup (i.e. returning
+ * a cookie to each rpm_get caller which they then supply to their
+ * paired rpm_put) we can remove corresponding pairs of and keep
+ * the array trimmed to active wakerefs.
+ */
+ struct intel_runtime_pm_debug {
+ spinlock_t lock;
+
+ depot_stack_handle_t last_acquire;
+ depot_stack_handle_t last_release;
+
+ depot_stack_handle_t *owners;
+ unsigned long count;
+ } debug;
+#endif
};
enum intel_pipe_crc_source {
@@ -1311,6 +1350,12 @@ struct i915_perf_stream {
struct list_head link;
/**
+ * @wakeref: As we keep the device awake while the perf stream is
+ * active, we track our runtime pm reference for later release.
+ */
+ intel_wakeref_t wakeref;
+
+ /**
* @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
* properties given when opening a stream, representing the contents
* of a single sample as read() by userspace.
@@ -1430,7 +1475,8 @@ struct drm_i915_private {
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
- const struct intel_device_info info;
+ const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
/**
@@ -1482,14 +1528,14 @@ struct drm_i915_private {
* Base address of where the gmbus and gpio blocks are located (either
* on PCH or on SoC for platforms without PCH).
*/
- uint32_t gpio_mmio_base;
+ u32 gpio_mmio_base;
/* MMIO base address for MIPI regs */
- uint32_t mipi_mmio_base;
+ u32 mipi_mmio_base;
- uint32_t psr_mmio_base;
+ u32 psr_mmio_base;
- uint32_t pps_mmio_base;
+ u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@@ -1744,17 +1790,17 @@ struct drm_i915_private {
* in 0.5us units for WM1+.
*/
/* primary */
- uint16_t pri_latency[5];
+ u16 pri_latency[5];
/* sprite */
- uint16_t spr_latency[5];
+ u16 spr_latency[5];
/* cursor */
- uint16_t cur_latency[5];
+ u16 cur_latency[5];
/*
* Raw watermark memory latency values
* for SKL for all 8 levels
* in 1us units.
*/
- uint16_t skl_latency[8];
+ u16 skl_latency[8];
/* current hardware state */
union {
@@ -1764,7 +1810,7 @@ struct drm_i915_private {
struct g4x_wm_values g4x;
};
- uint8_t max_level;
+ u8 max_level;
/*
* Should be held around atomic WM register writing; also
@@ -1942,12 +1988,18 @@ struct drm_i915_private {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
- struct list_head timelines;
+ struct i915_gt_timelines {
+ struct mutex mutex; /* protects list, tainted by GPU */
+ struct list_head active_list;
+
+ /* Pack multiple timelines' seqnos into the same page */
+ spinlock_t hwsp_lock;
+ struct list_head hwsp_free_list;
+ } timelines;
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
- u32 request_serial;
/**
* Is the GPU currently considered idle, or busy executing
@@ -1956,7 +2008,7 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- bool awake;
+ intel_wakeref_t awake;
/**
* The number of times we have woken up.
@@ -2191,17 +2243,12 @@ static inline unsigned int i915_sg_segment_size(void)
return size;
}
-static inline const struct intel_device_info *
-intel_info(const struct drm_i915_private *dev_priv)
-{
- return &dev_priv->info;
-}
-
-#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
-#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
-#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
+#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
+#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
@@ -2212,8 +2259,12 @@ intel_info(const struct drm_i915_private *dev_priv)
GENMASK((e) - 1, (s) - 1))
/* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN(dev_priv, s, e) \
- (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
+#define IS_GEN_RANGE(dev_priv, s, e) \
+ (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+
+#define IS_GEN(dev_priv, n) \
+ (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
+ INTEL_INFO(dev_priv)->gen == (n))
/*
* Return true if revision is in range [since,until] inclusive.
@@ -2223,7 +2274,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2245,7 +2296,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
- (dev_priv)->info.gt == 1)
+ INTEL_INFO(dev_priv)->gt == 1)
#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
@@ -2257,7 +2308,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
@@ -2268,11 +2319,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
+#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
+ INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2295,23 +2348,25 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 4)
+ INTEL_INFO(dev_priv)->gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
+#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
+ INTEL_DEVID(dev_priv) != 0x8A51)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2366,26 +2421,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
-/*
- * The genX designation typically refers to the render engine, so render
- * capability related checks should use IS_GEN, while display and other checks
- * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
- * chips, etc.).
- */
-#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
-#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
-#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
-#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
-#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
-#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
-#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
-#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
-#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
-
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
+#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2399,29 +2437,27 @@ intel_info(const struct drm_i915_private *dev_priv)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
+ (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
-
-#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
-#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
+#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
+#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
- ((dev_priv)->info.has_logical_ring_contexts)
+ (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
- ((dev_priv)->info.has_logical_ring_elsq)
+ (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- ((dev_priv)->info.has_logical_ring_preemption)
+ (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
@@ -2435,12 +2471,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+ ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
+#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.display.overlay_needs_physical)
+ (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2458,42 +2494,42 @@ intel_info(const struct drm_i915_private *dev_priv)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
+#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
+#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
+#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
+#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
-#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
+#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
+#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
+#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
-#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
-#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
+#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
+#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
@@ -2502,11 +2538,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc()
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
-#define USES_HUC(dev_priv) intel_uc_is_using_huc()
+#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
+#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
-#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2546,12 +2582,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
+#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
@@ -2601,19 +2637,7 @@ extern const struct dev_pm_ops i915_pm_ops;
extern int i915_driver_load(struct pci_dev *pdev,
const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
-extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-
-extern void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
- const char *reason);
-extern int i915_reset_engine(struct intel_engine_cs *engine,
- const char *reason);
-
-extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
-extern int intel_reset_guc(struct drm_i915_private *dev_priv);
-extern int intel_guc_reset_engine(struct intel_guc *guc,
- struct intel_engine_cs *engine);
+
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2656,20 +2680,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
-__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- unsigned long flags,
- const char *fmt, ...);
-#define I915_ERROR_CAPTURE BIT(0)
-
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-void i915_clear_error_registers(struct drm_i915_private *dev_priv);
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2693,45 +2708,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits);
+ u32 mask,
+ u32 bits);
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, 0);
}
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, uint32_t bits)
+ enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, uint32_t bits)
+ enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, 0);
}
@@ -2916,13 +2931,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
- I915_MM_SHRINKER
+ I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
};
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
@@ -2991,7 +3006,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+ u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
void i915_gem_track_fb(struct drm_i915_gem_object *old,
@@ -3008,11 +3023,6 @@ static inline bool i915_reset_backoff(struct i915_gpu_error *error)
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
}
-static inline bool i915_reset_handoff(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
-}
-
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_WEDGED, &error->flags));
@@ -3034,18 +3044,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
-void i915_gem_reset(struct drm_i915_private *dev_priv,
- unsigned int stalled_mask);
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3142,7 +3142,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
- uint32_t *reg_state);
+ u32 *reg_state);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3204,7 +3204,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3313,7 +3314,21 @@ static inline void intel_unregister_dsm_handler(void) { return; }
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
{
- return (struct intel_device_info *)&dev_priv->info;
+ return (struct intel_device_info *)INTEL_INFO(dev_priv);
+}
+
+static inline struct intel_sseu
+intel_device_default_sseu(struct drm_i915_private *i915)
+{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ struct intel_sseu value = {
+ .slice_mask = sseu->slice_mask,
+ .subslice_mask = sseu->subslice_mask[0],
+ .min_eus_per_subslice = sseu->max_eus_per_subslice,
+ .max_eus_per_subslice = sseu->max_eus_per_subslice,
+ };
+
+ return value;
}
/* modesetting */
@@ -3393,10 +3408,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_lat_optim_mask);
-uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+ u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
@@ -3599,90 +3614,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-static inline bool
-__i915_request_irq_complete(const struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 seqno;
-
- /* Note that the engine may have wrapped around the seqno, and
- * so our request->global_seqno will be ahead of the hardware,
- * even though it completed the request before wrapping. We catch
- * this by kicking all the waiters before resetting the seqno
- * in hardware, and also signal the fence.
- */
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
- return true;
-
- /* The request was dequeued before we were awoken. We check after
- * inspecting the hw to confirm that this was the same request
- * that generated the HWS update. The memory barriers within
- * the request execution are sufficient to ensure that a check
- * after reading the value from hw matches this request.
- */
- seqno = i915_request_global_seqno(rq);
- if (!seqno)
- return false;
-
- /* Before we do the heavier coherent read of the seqno,
- * check the value (hopefully) in the CPU cacheline.
- */
- if (__i915_request_completed(rq, seqno))
- return true;
-
- /* Ensure our read of the seqno is coherent so that we
- * do not "miss an interrupt" (i.e. if this is the last
- * request and the seqno write from the GPU is not visible
- * by the time the interrupt fires, we will see that the
- * request is incomplete and go back to sleep awaiting
- * another interrupt that will never come.)
- *
- * Strictly, we only need to do this once after an interrupt,
- * but it is easier and safer to do it every time the waiter
- * is woken.
- */
- if (engine->irq_seqno_barrier &&
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- /* The ordering of irq_posted versus applying the barrier
- * is crucial. The clearing of the current irq_posted must
- * be visible before we perform the barrier operation,
- * such that if a subsequent interrupt arrives, irq_posted
- * is reasserted and our task rewoken (which causes us to
- * do another __i915_request_irq_complete() immediately
- * and reapply the barrier). Conversely, if the clear
- * occurs after the barrier, then an interrupt that arrived
- * whilst we waited on the barrier would not trigger a
- * barrier on the next pass, and the read may not see the
- * seqno update.
- */
- engine->irq_seqno_barrier(engine);
-
- /* If we consume the irq, but we are no longer the bottom-half,
- * the real bottom-half may not have serialised their own
- * seqno check with the irq-barrier (i.e. may have inspected
- * the seqno before we believe it coherent since they see
- * irq_posted == false but we are still running).
- */
- spin_lock_irq(&b->irq_lock);
- if (b->irq_wait && b->irq_wait->tsk != current)
- /* Note that if the bottom-half is changed as we
- * are sending the wake-up, the new bottom-half will
- * be woken by whomever made the change. We only have
- * to worry about when we steal the irq-posted for
- * ourself.
- */
- wake_up_process(b->irq_wait->tsk);
- spin_unlock_irq(&b->irq_lock);
-
- if (__i915_request_completed(rq, seqno))
- return true;
- }
-
- return false;
-}
-
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c882ea94172c..6728ea5c71d4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,18 +25,9 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
+#include <drm/drm_pci.h>
#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_mocs.h"
-#include "intel_workarounds.h"
-#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@@ -46,6 +37,19 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/mman.h>
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gemfs.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_mocs.h"
+#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -139,6 +143,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
+
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -169,14 +175,13 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
i915_pmu_gt_parked(i915);
i915_vma_parked(i915);
- i915->gt.awake = false;
+ wakeref = fetch_and_zero(&i915->gt.awake);
+ GEM_BUG_ON(!wakeref);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
-
- intel_runtime_pm_put(i915);
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return i915->gt.epoch;
}
@@ -201,12 +206,11 @@ void i915_gem_unpark(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
+ assert_rpm_wakelock_held(i915);
if (i915->gt.awake)
return;
- intel_runtime_pm_get_noresume(i915);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -218,9 +222,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!i915->gt.awake);
- i915->gt.awake = true;
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
@@ -243,21 +247,19 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
+ mutex_lock(&ggtt->vm.mutex);
+
pinned = ggtt->vm.reserved;
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
- list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
- if (i915_vma_is_pinned(vma))
- pinned += vma->node.size;
- mutex_unlock(&dev->struct_mutex);
+
+ mutex_unlock(&ggtt->vm.mutex);
args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned;
@@ -437,15 +439,19 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- while ((vma = list_first_entry_or_null(&obj->vma_list,
- struct i915_vma,
- obj_link))) {
+ spin_lock(&obj->vma.lock);
+ while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
list_move_tail(&vma->obj_link, &still_in_list);
+ spin_unlock(&obj->vma.lock);
+
ret = i915_vma_unbind(vma);
- if (ret)
- break;
+
+ spin_lock(&obj->vma.lock);
}
- list_splice(&still_in_list, &obj->vma_list);
+ list_splice(&still_in_list, &obj->vma.list);
+ spin_unlock(&obj->vma.lock);
return ret;
}
@@ -655,11 +661,6 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
struct intel_rps_client *rps_client)
{
might_sleep();
-#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
- !!(flags & I915_WAIT_LOCKED));
-#endif
GEM_BUG_ON(timeout < 0);
timeout = i915_gem_object_wait_reservation(obj->resv,
@@ -711,8 +712,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
- uint64_t size,
- uint32_t *handle_p)
+ u64 size,
+ u32 *handle_p)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -783,6 +784,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
+ intel_wakeref_t wakeref;
+
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
@@ -809,13 +812,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
- intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->uncore.lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+ POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ }
}
static void
@@ -859,58 +862,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
obj->write_domain = 0;
}
-static inline int
-__copy_to_user_swizzled(char __user *cpu_vaddr,
- const char *gpu_vaddr, int gpu_offset,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_to_user(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
-static inline int
-__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
- const char __user *cpu_vaddr,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
@@ -1030,72 +981,23 @@ err_unpin:
return ret;
}
-static void
-shmem_clflush_swizzled_range(char *addr, unsigned long length,
- bool swizzled)
-{
- if (unlikely(swizzled)) {
- unsigned long start = (unsigned long) addr;
- unsigned long end = (unsigned long) addr + length;
-
- /* For swizzling simply ensure that we always flush both
- * channels. Lame, but simple and it works. Swizzled
- * pwrite/pread is far from a hotpath - current userspace
- * doesn't use it at all. */
- start = round_down(start, 128);
- end = round_up(end, 128);
-
- drm_clflush_virt_range((void *)start, end - start);
- } else {
- drm_clflush_virt_range(addr, length);
- }
-
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int offset, int length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
+shmem_pread(struct page *page, int offset, int len, char __user *user_data,
+ bool needs_clflush)
{
char *vaddr;
int ret;
vaddr = kmap(page);
- if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
-
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
- else
- ret = __copy_to_user(user_data, vaddr + offset, length);
- kunmap(page);
-
- return ret ? - EFAULT : 0;
-}
-static int
-shmem_pread(struct page *page, int offset, int length, char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- int ret;
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, len);
- ret = -ENODEV;
- if (!page_do_bit17_swizzling) {
- char *vaddr = kmap_atomic(page);
+ ret = __copy_to_user(user_data, vaddr + offset, len);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + offset, length);
- ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
- kunmap_atomic(vaddr);
- }
- if (ret == 0)
- return 0;
+ kunmap(page);
- return shmem_pread_slow(page, offset, length, user_data,
- page_do_bit17_swizzling, needs_clflush);
+ return ret ? -EFAULT : 0;
}
static int
@@ -1104,15 +1006,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
{
char __user *user_data;
u64 remain;
- unsigned int obj_do_bit17_swizzling;
unsigned int needs_clflush;
unsigned int idx, offset;
int ret;
- obj_do_bit17_swizzling = 0;
- if (i915_gem_object_needs_bit17_swizzle(obj))
- obj_do_bit17_swizzling = BIT(17);
-
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
if (ret)
return ret;
@@ -1130,7 +1027,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
- page_to_phys(page) & obj_do_bit17_swizzling,
needs_clflush);
if (ret)
break;
@@ -1174,6 +1070,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
@@ -1184,7 +1081,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
@@ -1257,7 +1154,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1358,6 +1255,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
u64 remain, offset;
@@ -1376,13 +1274,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
- if (!intel_runtime_pm_get_if_in_use(i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1464,39 +1363,12 @@ out_unpin:
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-static int
-shmem_pwrite_slow(struct page *page, int offset, int length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap(page);
- if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, offset, user_data,
- length);
- else
- ret = __copy_from_user(vaddr + offset, user_data, length);
- if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
- kunmap(page);
-
- return ret ? -EFAULT : 0;
-}
-
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
@@ -1504,31 +1376,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
*/
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
- bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
+ char *vaddr;
int ret;
- ret = -ENODEV;
- if (!page_do_bit17_swizzling) {
- char *vaddr = kmap_atomic(page);
+ vaddr = kmap(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + offset, len);
- ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + offset, len);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
- kunmap_atomic(vaddr);
- }
- if (ret == 0)
- return ret;
+ ret = __copy_from_user(vaddr + offset, user_data, len);
+ if (!ret && needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
+
+ kunmap(page);
- return shmem_pwrite_slow(page, offset, len, user_data,
- page_do_bit17_swizzling,
- needs_clflush_before,
- needs_clflush_after);
+ return ret ? -EFAULT : 0;
}
static int
@@ -1538,7 +1403,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
void __user *user_data;
u64 remain;
- unsigned int obj_do_bit17_swizzling;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
@@ -1553,10 +1417,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- obj_do_bit17_swizzling = 0;
- if (i915_gem_object_needs_bit17_swizzle(obj))
- obj_do_bit17_swizzling = BIT(17);
-
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
@@ -1573,7 +1433,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
- page_to_phys(page) & obj_do_bit17_swizzling,
(offset | length) & partial_cacheline_write,
needs_clflush & CLFLUSH_AFTER);
if (ret)
@@ -1677,23 +1536,21 @@ err:
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct list_head *list;
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ mutex_lock(&i915->ggtt.vm.mutex);
for_each_ggtt_vma(vma, obj) {
- if (i915_vma_is_active(vma))
- continue;
-
if (!drm_mm_node_allocated(&vma->node))
continue;
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
+ mutex_unlock(&i915->ggtt.vm.mutex);
- i915 = to_i915(obj->base.dev);
spin_lock(&i915->mm.obj_lock);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
list_move_tail(&obj->mm.link, list);
@@ -1713,8 +1570,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_i915_gem_object *obj;
- uint32_t read_domains = args->read_domains;
- uint32_t write_domain = args->write_domain;
+ u32 read_domains = args->read_domains;
+ u32 write_domain = args->write_domain;
int err;
/* Only handle setting domains to types used by the CPU. */
@@ -1883,6 +1740,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
if (args->flags & I915_MMAP_WC) {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -1898,17 +1758,22 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
else
addr = -ENOMEM;
up_write(&mm->mmap_sem);
+ if (IS_ERR_VALUE(addr))
+ goto err;
/* This may race, but that's ok, it only gets set */
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put(obj);
- if (IS_ERR((void *)addr))
- return addr;
- args->addr_ptr = (uint64_t) addr;
+ args->addr_ptr = (u64)addr;
return 0;
+
+err:
+ i915_gem_object_put(obj);
+
+ return addr;
}
static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
@@ -2019,6 +1884,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = area->vm_flags & VM_WRITE;
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
@@ -2048,7 +1914,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -2126,7 +1992,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
@@ -2199,6 +2065,7 @@ void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
@@ -2209,7 +2076,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (!obj->userfault_count)
goto out;
@@ -2226,7 +2093,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@@ -2306,8 +2173,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
+ u32 handle,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -2454,8 +2321,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
- if (!pages)
- return NULL;
+ if (IS_ERR_OR_NULL(pages))
+ return pages;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2479,22 +2346,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
{
struct sg_table *pages;
+ int ret;
if (i915_gem_object_has_pinned_pages(obj))
- return;
+ return -EBUSY;
GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+ ret = -EBUSY;
goto unlock;
+ }
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
@@ -2502,11 +2370,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
* lists early.
*/
pages = __i915_gem_object_unset_pages(obj);
+
+ /*
+ * XXX Temporary hijinx to avoid updating all backends to handle
+ * NULL pages. In the future, when we have more asynchronous
+ * get_pages backends we should be better able to handle the
+ * cancellation of the async task in a more uniform manner.
+ */
+ if (!pages && !i915_gem_object_needs_async_cancel(obj))
+ pages = ERR_PTR(-EINVAL);
+
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
+ ret = 0;
unlock:
mutex_unlock(&obj->mm.lock);
+
+ return ret;
}
bool i915_sg_trim(struct sg_table *orig_st)
@@ -3010,59 +2891,12 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
- const struct i915_gem_context *ctx)
+static bool match_ring(struct i915_request *rq)
{
- unsigned int score;
- unsigned long prev_hang;
-
- if (i915_gem_context_is_banned(ctx))
- score = I915_CLIENT_SCORE_CONTEXT_BAN;
- else
- score = 0;
+ struct drm_i915_private *dev_priv = rq->i915;
+ u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
- prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
- if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
- score += I915_CLIENT_SCORE_HANG_FAST;
-
- if (score) {
- atomic_add(score, &file_priv->ban_score);
-
- DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
- ctx->name, score,
- atomic_read(&file_priv->ban_score));
- }
-}
-
-static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
-{
- unsigned int score;
- bool banned, bannable;
-
- atomic_inc(&ctx->guilty_count);
-
- bannable = i915_gem_context_is_bannable(ctx);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
- /* Cool contexts don't accumulate client ban score */
- if (!bannable)
- return;
-
- if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score);
- i915_gem_context_set_banned(ctx);
- }
-
- if (!IS_ERR_OR_NULL(ctx->file_priv))
- i915_gem_client_mark_guilty(ctx->file_priv, ctx);
-}
-
-static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
-{
- atomic_inc(&ctx->active_count);
+ return ring == i915_ggtt_offset(rq->ring->vma);
}
struct i915_request *
@@ -3084,9 +2918,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->timeline.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) {
- if (__i915_request_completed(request, request->global_seqno))
+ if (i915_request_completed(request))
continue;
+ if (!i915_request_started(request))
+ break;
+
+ /* More than one preemptible request may match! */
+ if (!match_ring(request))
+ break;
+
active = request;
break;
}
@@ -3095,366 +2936,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return active;
}
-/*
- * Ensure irq handler finishes, and not run again.
- * Also return the active request so that we only search for it once.
- */
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
-{
- struct i915_request *request;
-
- /*
- * During the reset sequence, we must prevent the engine from
- * entering RC6. As the context state is undefined until we restart
- * the engine, if it does enter RC6 during the reset, the state
- * written to the powercontext is undefined and so we may lose
- * GPU state upon resume, i.e. fail to restart after a reset.
- */
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- request = engine->reset.prepare(engine);
- if (request && request->fence.error == -EIO)
- request = ERR_PTR(-EIO); /* Previous reset failed! */
-
- return request;
-}
-
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- struct i915_request *request;
- enum intel_engine_id id;
- int err = 0;
-
- for_each_engine(engine, dev_priv, id) {
- request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- continue;
- }
-
- engine->hangcheck.active_request = request;
- }
-
- i915_gem_revoke_fences(dev_priv);
- intel_uc_sanitize(dev_priv);
-
- return err;
-}
-
-static void engine_skip_context(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->gem_context;
- struct i915_timeline *timeline = request->timeline;
- unsigned long flags;
-
- GEM_BUG_ON(timeline == &engine->timeline);
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock(&timeline->lock);
-
- list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->gem_context == hung_ctx)
- i915_request_skip(request, -EIO);
-
- list_for_each_entry(request, &timeline->requests, link)
- i915_request_skip(request, -EIO);
-
- spin_unlock(&timeline->lock);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
-/* Returns the request if it was guilty of the hang */
-static struct i915_request *
-i915_gem_reset_request(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled)
-{
- /* The guilty request will get skipped on a hung engine.
- *
- * Users of client default contexts do not rely on logical
- * state preserved between batches so it is safe to execute
- * queued requests following the hang. Non default contexts
- * rely on preserved state, so skipping a batch loses the
- * evolution of the state and it needs to be considered corrupted.
- * Executing more queued batches on top of corrupted state is
- * risky. But we take the risk by trying to advance through
- * the queued requests in order to make the client behaviour
- * more predictable around resets, by not throwing away random
- * amount of batches it has prepared for execution. Sophisticated
- * clients can use gem_reset_stats_ioctl and dma fence status
- * (exported via sync_file info ioctl on explicit fences) to observe
- * when it loses the context state and should rebuild accordingly.
- *
- * The context ban, and ultimately the client ban, mechanism are safety
- * valves if client submission ends up resulting in nothing more than
- * subsequent hangs.
- */
-
- if (i915_request_completed(request)) {
- GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
- engine->name, request->global_seqno,
- request->fence.context, request->fence.seqno,
- intel_engine_get_seqno(engine));
- stalled = false;
- }
-
- if (stalled) {
- i915_gem_context_mark_guilty(request->gem_context);
- i915_request_skip(request, -EIO);
-
- /* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->gem_context))
- engine_skip_context(request);
- } else {
- /*
- * Since this is not the hung engine, it may have advanced
- * since the hang declaration. Double check by refinding
- * the active request at the time of the reset.
- */
- request = i915_gem_find_active_request(engine);
- if (request) {
- unsigned long flags;
-
- i915_gem_context_mark_innocent(request->gem_context);
- dma_fence_set_error(&request->fence, -EAGAIN);
-
- /* Rewind the engine to replay the incomplete rq */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- request = list_prev_entry(request, link);
- if (&request->link == &engine->timeline.requests)
- request = NULL;
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
- }
- }
-
- return request;
-}
-
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled)
-{
- /*
- * Make sure this write is visible before we re-enable the interrupt
- * handlers on another CPU, as tasklet_enable() resolves to just
- * a compiler barrier which is insufficient for our purpose here.
- */
- smp_store_mb(engine->irq_posted, 0);
-
- if (request)
- request = i915_gem_reset_request(engine, request, stalled);
-
- /* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset.reset(engine, request);
-}
-
-void i915_gem_reset(struct drm_i915_private *dev_priv,
- unsigned int stalled_mask)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- i915_retire_requests(dev_priv);
-
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce;
-
- i915_gem_reset_engine(engine,
- engine->hangcheck.active_request,
- stalled_mask & ENGINE_MASK(id));
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-
- /*
- * Ostensibily, we always want a context loaded for powersaving,
- * so if the engine is idle after the reset, send a request
- * to load our scratch kernel_context.
- *
- * More mysteriously, if we leave the engine idle after a reset,
- * the next userspace batch may hang, with what appears to be
- * an incoherent read by the CS (presumably stale TLB). An
- * empty request appears sufficient to paper over the glitch.
- */
- if (intel_engine_is_idle(engine)) {
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine,
- dev_priv->kernel_context);
- if (!IS_ERR(rq))
- i915_request_add(rq);
- }
- }
-
- i915_gem_restore_fences(dev_priv);
-}
-
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
-{
- engine->reset.finish(engine);
-
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
-}
-
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for_each_engine(engine, dev_priv, id) {
- engine->hangcheck.active_request = NULL;
- i915_gem_reset_finish_engine(engine);
- }
-}
-
-static void nop_submit_request(struct i915_request *request)
-{
- unsigned long flags;
-
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- spin_lock_irqsave(&request->engine->timeline.lock, flags);
- __i915_request_submit(request);
- intel_engine_init_global_seqno(request->engine, request->global_seqno);
- spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- GEM_TRACE("start\n");
-
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- for_each_engine(engine, i915, id)
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
-
- if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
- goto out;
-
- /*
- * First, stop submission to hw, but do not yet complete requests by
- * rolling the global seqno forward (since this would complete requests
- * for which we haven't set the fence error to EIO yet).
- */
- for_each_engine(engine, i915, id)
- i915_gem_reset_prepare_engine(engine);
-
- /* Even if the GPU reset fails, it should still stop the engines */
- if (INTEL_GEN(i915) >= 5)
- intel_gpu_reset(i915, ALL_ENGINES);
-
- for_each_engine(engine, i915, id) {
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
- /*
- * Make sure no request can slip through without getting completed by
- * either this call here to intel_engine_init_global_seqno, or the one
- * in nop_submit_request.
- */
- synchronize_rcu();
-
- /* Mark all executing requests as skipped */
- for_each_engine(engine, i915, id)
- engine->cancel_requests(engine);
-
- for_each_engine(engine, i915, id) {
- i915_gem_reset_finish_engine(engine);
- intel_engine_wakeup(engine);
- }
-
-out:
- GEM_TRACE("end\n");
-
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
-{
- struct i915_timeline *tl;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
- return true;
-
- GEM_TRACE("start\n");
-
- /*
- * Before unwedging, make sure that all pending operations
- * are flushed and errored out - we may have requests waiting upon
- * third party fences. We marked all inflight requests as EIO, and
- * every execbuf since returned EIO, for consistency we want all
- * the currently pending requests to also be marked as EIO, which
- * is done inside our nop_submit_request - and so we must wait.
- *
- * No more can be submitted until we reset the wedged bit.
- */
- list_for_each_entry(tl, &i915->gt.timelines, link) {
- struct i915_request *rq;
-
- rq = i915_gem_active_peek(&tl->last_request,
- &i915->drm.struct_mutex);
- if (!rq)
- continue;
-
- /*
- * We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
- */
- if (dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT) < 0)
- return false;
- }
- i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
-
- if (!intel_gpu_reset(i915, ALL_ENGINES))
- intel_engines_sanitize(i915);
-
- /*
- * Undo nop_submit_request. We prevent all new i915 requests from
- * being queued (by disallowing execbuf whilst wedged) so having
- * waited for all active requests above, we know the system is idle
- * and do not have to worry about a thread being inside
- * engine->submit_request() as we swap over. So unlike installing
- * the nop_submit_request on reset, we can do this from normal
- * context and do not require stop_machine().
- */
- intel_engines_reset_default_submission(i915);
- i915_gem_contexts_lost(i915);
-
- GEM_TRACE("end\n");
-
- smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
- clear_bit(I915_WEDGED, &i915->gpu_error.flags);
-
- return true;
-}
-
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -3557,7 +3038,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
GEM_BUG_ON(i915->gt.active_requests);
for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+ GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
GEM_BUG_ON(engine->last_retired_context !=
to_intel_context(i915->kernel_context, engine));
}
@@ -3776,33 +3257,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return ret;
}
-static long wait_for_timeline(struct i915_timeline *tl,
- unsigned int flags, long timeout)
-{
- struct i915_request *rq;
-
- rq = i915_gem_active_get_unlocked(&tl->last_request);
- if (!rq)
- return timeout;
-
- /*
- * "Race-to-idle".
- *
- * Switching to the kernel context is often used a synchronous
- * step prior to idling, e.g. in suspend for flushing all
- * current operations to memory before sleeping. These we
- * want to complete as quickly as possible to avoid prolonged
- * stalls, so allow the gpu to boost to maximum clocks.
- */
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq, NULL);
-
- timeout = i915_request_wait(rq, flags, timeout);
- i915_request_put(rq);
-
- return timeout;
-}
-
static int wait_for_engines(struct drm_i915_private *i915)
{
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -3816,6 +3270,52 @@ static int wait_for_engines(struct drm_i915_private *i915)
return 0;
}
+static long
+wait_for_timelines(struct drm_i915_private *i915,
+ unsigned int flags, long timeout)
+{
+ struct i915_gt_timelines *gt = &i915->gt.timelines;
+ struct i915_timeline *tl;
+
+ if (!READ_ONCE(i915->gt.active_requests))
+ return timeout;
+
+ mutex_lock(&gt->mutex);
+ list_for_each_entry(tl, &gt->active_list, link) {
+ struct i915_request *rq;
+
+ rq = i915_active_request_get_unlocked(&tl->last_request);
+ if (!rq)
+ continue;
+
+ mutex_unlock(&gt->mutex);
+
+ /*
+ * "Race-to-idle".
+ *
+ * Switching to the kernel context is often used a synchronous
+ * step prior to idling, e.g. in suspend for flushing all
+ * current operations to memory before sleeping. These we
+ * want to complete as quickly as possible to avoid prolonged
+ * stalls, so allow the gpu to boost to maximum clocks.
+ */
+ if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ gen6_rps_boost(rq, NULL);
+
+ timeout = i915_request_wait(rq, flags, timeout);
+ i915_request_put(rq);
+ if (timeout < 0)
+ return timeout;
+
+ /* restart after reacquiring the lock */
+ mutex_lock(&gt->mutex);
+ tl = list_entry(&gt->active_list, typeof(*tl), link);
+ }
+ mutex_unlock(&gt->mutex);
+
+ return timeout;
+}
+
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
@@ -3827,17 +3327,15 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
if (!READ_ONCE(i915->gt.awake))
return 0;
+ timeout = wait_for_timelines(i915, flags, timeout);
+ if (timeout < 0)
+ return timeout;
+
if (flags & I915_WAIT_LOCKED) {
- struct i915_timeline *tl;
int err;
lockdep_assert_held(&i915->drm.struct_mutex);
- list_for_each_entry(tl, &i915->gt.timelines, link) {
- timeout = wait_for_timeline(tl, flags, timeout);
- if (timeout < 0)
- return timeout;
- }
if (GEM_SHOW_DEBUG() && !timeout) {
/* Presume that timeout was non-zero to begin with! */
dev_warn(&i915->drm.pdev->dev,
@@ -3851,17 +3349,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
- } else {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- struct i915_timeline *tl = &engine->timeline;
-
- timeout = wait_for_timeline(tl, flags, timeout);
- if (timeout < 0)
- return timeout;
- }
}
return 0;
@@ -4047,7 +3534,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* reading an invalid PTE on older architectures.
*/
restart:
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -4125,7 +3612,7 @@ restart:
*/
}
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -4135,7 +3622,7 @@ restart:
}
}
- list_for_each_entry(vma, &obj->vma_list, obj_link)
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
vma->node.color = cache_level;
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
@@ -4698,7 +4185,8 @@ out:
}
static void
-frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
+frontbuffer_retire(struct i915_active_request *active,
+ struct i915_request *request)
{
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
@@ -4711,7 +4199,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->vma_list);
+ spin_lock_init(&obj->vma.lock);
+ INIT_LIST_HEAD(&obj->vma.list);
+
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4723,7 +4213,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
+ i915_active_request_init(&obj->frontbuffer_write,
+ NULL, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
@@ -4866,8 +4357,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
@@ -4876,14 +4368,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
GEM_BUG_ON(i915_gem_object_is_active(obj));
- list_for_each_entry_safe(vma, vn,
- &obj->vma_list, obj_link) {
+ list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_destroy(vma);
}
- GEM_BUG_ON(!list_empty(&obj->vma_list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
+ GEM_BUG_ON(!list_empty(&obj->vma.list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
/* This serializes freeing with the shrinker. Since the free
* is delayed, first by RCU then by the workqueue, we want the
@@ -4928,7 +4419,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (on)
cond_resched();
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@@ -5037,13 +4528,11 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
- int err;
+ intel_wakeref_t wakeref;
GEM_TRACE("\n");
- mutex_lock(&i915->drm.struct_mutex);
-
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
/*
@@ -5063,28 +4552,28 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- err = -ENODEV;
- if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
- if (!err)
- intel_engines_sanitize(i915);
+ intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_lock(&i915->drm.struct_mutex);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
int i915_gem_suspend(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
int ret;
GEM_TRACE("\n");
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
+ flush_workqueue(i915->wq);
+
mutex_lock(&i915->drm.struct_mutex);
/*
@@ -5114,11 +4603,9 @@ int i915_gem_suspend(struct drm_i915_private *i915)
i915_retire_requests(i915); /* ensure we flush after wedging */
mutex_unlock(&i915->drm.struct_mutex);
+ i915_reset_flush(i915);
- intel_uc_suspend(i915);
-
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&i915->gt.retire_work);
+ drain_delayed_work(&i915->gt.retire_work);
/*
* As the idle_work is rearming if it detects a race, play safe and
@@ -5126,6 +4613,8 @@ int i915_gem_suspend(struct drm_i915_private *i915)
*/
drain_delayed_work(&i915->gt.idle_work);
+ intel_uc_suspend(i915);
+
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
@@ -5134,12 +4623,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return ret;
}
@@ -5233,15 +4722,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev_priv))
+ else if (IS_GEN(dev_priv, 8))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
@@ -5263,10 +4752,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
init_unused_ring(dev_priv, SRB1_BASE);
init_unused_ring(dev_priv, SRB2_BASE);
init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, PRB2_BASE);
}
@@ -5562,6 +5051,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
+ i915_timelines_init(dev_priv);
+
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
@@ -5590,7 +5081,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
ret = i915_gem_init_scratch(dev_priv,
- IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
+ IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
@@ -5684,8 +5175,10 @@ err_unlock:
err_uc_misc:
intel_uc_fini_misc(dev_priv);
- if (ret != -EIO)
+ if (ret != -EIO) {
i915_gem_cleanup_userptr(dev_priv);
+ i915_timelines_fini(dev_priv);
+ }
if (ret == -EIO) {
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5736,6 +5229,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
+ i915_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -5838,7 +5332,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
if (!dev_priv->priorities)
goto err_dependencies;
- INIT_LIST_HEAD(&dev_priv->gt.timelines);
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
@@ -5850,6 +5343,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+ mutex_init(&dev_priv->gpu_error.wedge_mutex);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
@@ -5881,7 +5375,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
- WARN_ON(!list_empty(&dev_priv->gt.timelines));
kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 371c07087095..280813a4bf82 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -86,10 +86,10 @@
*/
#include <linux/log2.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
@@ -311,7 +311,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(i915))
+ if (IS_GEN(i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
@@ -322,6 +322,32 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ ce->gem_context = ctx;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ /* Use the whole device by default */
+ ce->sseu = intel_device_default_sseu(ctx->i915);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@@ -339,11 +365,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
-
- ce->gem_context = ctx;
- }
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+ intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
@@ -646,10 +669,10 @@ last_request_on_engine(struct i915_timeline *timeline,
GEM_BUG_ON(timeline == &engine->timeline);
- rq = i915_gem_active_raw(&timeline->last_request,
- &engine->i915->drm.struct_mutex);
+ rq = i915_active_request_raw(&timeline->last_request,
+ &engine->i915->drm.struct_mutex);
if (rq && rq->engine == engine) {
- GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+ GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
timeline->name, engine->name,
rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
@@ -686,14 +709,14 @@ static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
* switch-to-kernel-context?
*/
if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
- GEM_TRACE("%s needs barrier for %llx:%d\n",
+ GEM_TRACE("%s needs barrier for %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
return false;
}
- GEM_TRACE("%s has barrier after %llx:%d\n",
+ GEM_TRACE("%s has barrier after %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
@@ -749,7 +772,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
if (prev->gem_context == i915->kernel_context)
continue;
- GEM_TRACE("add barrier on %s for %llx:%d\n",
+ GEM_TRACE("add barrier on %s for %llx:%lld\n",
engine->name,
prev->fence.context,
prev->fence.seqno);
@@ -840,6 +863,56 @@ out:
return 0;
}
+static int get_sseu(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int ret;
+
+ if (args->size == 0)
+ goto out;
+ else if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.flags || user_sseu.rsvd)
+ return -EINVAL;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ user_sseu.engine_class,
+ user_sseu.engine_instance);
+ if (!engine)
+ return -EINVAL;
+
+ /* Only use for mutex here is to serialize get_param and set_param. */
+ ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ ce = to_intel_context(ctx, engine);
+
+ user_sseu.slice_mask = ce->sseu.slice_mask;
+ user_sseu.subslice_mask = ce->sseu.subslice_mask;
+ user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+ user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+ if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+out:
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -852,15 +925,17 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
if (!ctx)
return -ENOENT;
- args->size = 0;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->size = 0;
args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
case I915_CONTEXT_PARAM_GTT_SIZE:
+ args->size = 0;
+
if (ctx->ppgtt)
args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
@@ -869,14 +944,20 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = to_i915(dev)->ggtt.vm.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->size = 0;
args->value = i915_gem_context_no_error_capture(ctx);
break;
case I915_CONTEXT_PARAM_BANNABLE:
+ args->size = 0;
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
+ args->size = 0;
args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = get_sseu(ctx, args);
+ break;
default:
ret = -EINVAL;
break;
@@ -886,6 +967,281 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
return ret;
}
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+ struct intel_context *ce,
+ struct intel_sseu sseu)
+{
+ u64 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) +
+ LRC_STATE_PN * PAGE_SIZE +
+ (CTX_R_PWR_CLK_STATE + 1) * 4;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = gen8_make_rpcs(rq->i915, &sseu);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_modify_rpcs_gpu(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ struct intel_sseu sseu)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq, *prev;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ GEM_BUG_ON(!ce->pin_count);
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ /* Submitting requests etc needs the hw awake. */
+ wakeref = intel_runtime_pm_get(i915);
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_put;
+ }
+
+ /* Queue this switch after all other activity by this context. */
+ prev = i915_active_request_raw(&ce->ring->timeline->last_request,
+ &i915->drm.struct_mutex);
+ if (prev && !i915_request_completed(prev)) {
+ ret = i915_request_await_dma_fence(rq, &prev->fence);
+ if (ret < 0)
+ goto out_add;
+ }
+
+ /* Order all following requests to be after. */
+ ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+ if (ret)
+ goto out_add;
+
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
+ if (ret)
+ goto out_add;
+
+ /*
+ * Guarantee context image and the timeline remains pinned until the
+ * modifying request is retired by setting the ce activity tracker.
+ *
+ * But we only need to take one pin on the account of it. Or in other
+ * words transfer the pinned ce object to tracked active request.
+ */
+ if (!i915_active_request_isset(&ce->active_tracker))
+ __intel_context_pin(ce);
+ __i915_active_request_set(&ce->active_tracker, rq);
+
+out_add:
+ i915_request_add(rq);
+out_put:
+ intel_runtime_pm_put(i915, wakeref);
+
+ return ret;
+}
+
+static int
+__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_sseu sseu)
+{
+ struct intel_context *ce = to_intel_context(ctx, engine);
+ int ret = 0;
+
+ GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
+ GEM_BUG_ON(engine->id != RCS);
+
+ /* Nothing to do if unmodified. */
+ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+ return 0;
+
+ /*
+ * If context is not idle we have to submit an ordered request to modify
+ * its context image via the kernel context. Pristine and idle contexts
+ * will be configured on pinning.
+ */
+ if (ce->pin_count)
+ ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+
+ if (!ret)
+ ce->sseu = sseu;
+
+ return ret;
+}
+
+static int
+i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_sseu sseu)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+ return ret;
+}
+
+static int
+user_to_context_sseu(struct drm_i915_private *i915,
+ const struct drm_i915_gem_context_param_sseu *user,
+ struct intel_sseu *context)
+{
+ const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu;
+
+ /* No zeros in any field. */
+ if (!user->slice_mask || !user->subslice_mask ||
+ !user->min_eus_per_subslice || !user->max_eus_per_subslice)
+ return -EINVAL;
+
+ /* Max > min. */
+ if (user->max_eus_per_subslice < user->min_eus_per_subslice)
+ return -EINVAL;
+
+ /*
+ * Some future proofing on the types since the uAPI is wider than the
+ * current internal implementation.
+ */
+ if (overflows_type(user->slice_mask, context->slice_mask) ||
+ overflows_type(user->subslice_mask, context->subslice_mask) ||
+ overflows_type(user->min_eus_per_subslice,
+ context->min_eus_per_subslice) ||
+ overflows_type(user->max_eus_per_subslice,
+ context->max_eus_per_subslice))
+ return -EINVAL;
+
+ /* Check validity against hardware. */
+ if (user->slice_mask & ~device->slice_mask)
+ return -EINVAL;
+
+ if (user->subslice_mask & ~device->subslice_mask[0])
+ return -EINVAL;
+
+ if (user->max_eus_per_subslice > device->max_eus_per_subslice)
+ return -EINVAL;
+
+ context->slice_mask = user->slice_mask;
+ context->subslice_mask = user->subslice_mask;
+ context->min_eus_per_subslice = user->min_eus_per_subslice;
+ context->max_eus_per_subslice = user->max_eus_per_subslice;
+
+ /* Part specific restrictions. */
+ if (IS_GEN(i915, 11)) {
+ unsigned int hw_s = hweight8(device->slice_mask);
+ unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
+ unsigned int req_s = hweight8(context->slice_mask);
+ unsigned int req_ss = hweight8(context->subslice_mask);
+
+ /*
+ * Only full subslice enablement is possible if more than one
+ * slice is turned on.
+ */
+ if (req_s > 1 && req_ss != hw_ss_per_s)
+ return -EINVAL;
+
+ /*
+ * If more than four (SScount bitfield limit) subslices are
+ * requested then the number has to be even.
+ */
+ if (req_ss > 4 && (req_ss & 1))
+ return -EINVAL;
+
+ /*
+ * If only one slice is enabled and subslice count is below the
+ * device full enablement, it must be at most half of the all
+ * available subslices.
+ */
+ if (req_s == 1 && req_ss < hw_ss_per_s &&
+ req_ss > (hw_ss_per_s / 2))
+ return -EINVAL;
+
+ /* ABI restriction - VME use case only. */
+
+ /* All slices or one slice only. */
+ if (req_s != 1 && req_s != hw_s)
+ return -EINVAL;
+
+ /*
+ * Half subslices or full enablement only when one slice is
+ * enabled.
+ */
+ if (req_s == 1 &&
+ (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
+ return -EINVAL;
+
+ /* No EU configuration changes. */
+ if ((user->min_eus_per_subslice !=
+ device->max_eus_per_subslice) ||
+ (user->max_eus_per_subslice !=
+ device->max_eus_per_subslice))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_sseu(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_engine_cs *engine;
+ struct intel_sseu sseu;
+ int ret;
+
+ if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (!IS_GEN(i915, 11))
+ return -ENODEV;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.flags || user_sseu.rsvd)
+ return -EINVAL;
+
+ engine = intel_engine_lookup_user(i915,
+ user_sseu.engine_class,
+ user_sseu.engine_instance);
+ if (!engine)
+ return -EINVAL;
+
+ /* Only render engine supports RPCS configuration. */
+ if (engine->class != RENDER_CLASS)
+ return -ENODEV;
+
+ ret = user_to_context_sseu(i915, &user_sseu, &sseu);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ if (ret)
+ return ret;
+
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -948,7 +1304,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
I915_USER_PRIORITY(priority);
}
break;
-
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = set_sseu(ctx, args);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index f6d870b1f73e..ca150a764c24 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -31,6 +31,7 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "intel_device_info.h"
struct pid;
@@ -53,6 +54,16 @@ struct intel_context_ops {
void (*destroy)(struct intel_context *ce);
};
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
/**
* struct i915_gem_context - client state
*
@@ -164,13 +175,24 @@ struct i915_gem_context {
struct intel_context {
struct i915_gem_context *gem_context;
struct intel_engine_cs *active;
+ struct list_head signal_link;
+ struct list_head signals;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
+ /**
+ * active_tracker: Active tracker for the external rq activity
+ * on this intel_context object.
+ */
+ struct i915_active_request active_tracker;
+
const struct intel_context_ops *ops;
+
+ /** sseu: Control eu/slice partitioning */
+ struct intel_sseu sseu;
} __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@@ -364,4 +386,8 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82e2ca17a441..02f7298bfe57 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,7 +27,6 @@
#include <linux/dma-buf.h>
#include <linux/reservation.h>
-#include <drm/drmP.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 02b83a5ed96c..68d74c50ac39 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,7 +26,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -127,31 +126,25 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan;
struct list_head eviction_list;
- struct list_head *phases[] = {
- &vm->inactive_list,
- &vm->active_list,
- NULL,
- }, **phase;
struct i915_vma *vma, *next;
struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
+ struct i915_vma *active;
int ret;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
- * The goal is to evict objects and amalgamate space in LRU order.
- * The oldest idle objects reside on the inactive list, which is in
- * retirement order. The next objects to retire are those in flight,
- * on the active list, again in retirement order.
+ * The goal is to evict objects and amalgamate space in rough LRU order.
+ * Since both active and inactive objects reside on the same list,
+ * in a mix of creation and last scanned order, as we process the list
+ * we sort it into inactive/active, which keeps the active portion
+ * in a rough MRU order.
*
* The retirement sequence is thus:
- * 1. Inactive objects (already retired)
- * 2. Active objects (will stall on unbinding)
- *
- * On each list, the oldest objects lie at the HEAD with the freshest
- * object on the TAIL.
+ * 1. Inactive objects (already retired, random order)
+ * 2. Active objects (will stall on unbinding, oldest scanned first)
*/
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
@@ -170,17 +163,46 @@ i915_gem_evict_something(struct i915_address_space *vm,
*/
if (!(flags & PIN_NONBLOCK))
i915_retire_requests(dev_priv);
- else
- phases[1] = NULL;
search_again:
+ active = NULL;
INIT_LIST_HEAD(&eviction_list);
- phase = phases;
- do {
- list_for_each_entry(vma, *phase, vm_link)
- if (mark_free(&scan, vma, flags, &eviction_list))
- goto found;
- } while (*++phase);
+ list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+ /*
+ * We keep this list in a rough least-recently scanned order
+ * of active elements (inactive elements are cheap to reap).
+ * New entries are added to the end, and we move anything we
+ * scan to the end. The assumption is that the working set
+ * of applications is either steady state (and thanks to the
+ * userspace bo cache it almost always is) or volatile and
+ * frequently replaced after a frame, which are self-evicting!
+ * Given that assumption, the MRU order of the scan list is
+ * fairly static, and keeping it in least-recently scan order
+ * is suitable.
+ *
+ * To notice when we complete one full cycle, we record the
+ * first active element seen, before moving it to the tail.
+ */
+ if (i915_vma_is_active(vma)) {
+ if (vma == active) {
+ if (flags & PIN_NONBLOCK)
+ break;
+
+ active = ERR_PTR(-EAGAIN);
+ }
+
+ if (active != ERR_PTR(-EAGAIN)) {
+ if (!active)
+ active = vma;
+
+ list_move_tail(&vma->vm_link, &vm->bound_list);
+ continue;
+ }
+ }
+
+ if (mark_free(&scan, vma, flags, &eviction_list))
+ goto found;
+ }
/* Nothing found, clean up and bail out! */
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
@@ -389,11 +411,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
*/
int i915_gem_evict_vm(struct i915_address_space *vm)
{
- struct list_head *phases[] = {
- &vm->inactive_list,
- &vm->active_list,
- NULL
- }, **phase;
struct list_head eviction_list;
struct i915_vma *vma, *next;
int ret;
@@ -413,16 +430,15 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
}
INIT_LIST_HEAD(&eviction_list);
- phase = phases;
- do {
- list_for_each_entry(vma, *phase, vm_link) {
- if (i915_vma_is_pinned(vma))
- continue;
+ mutex_lock(&vm->mutex);
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
+ if (i915_vma_is_pinned(vma))
+ continue;
- __i915_vma_pin(vma);
- list_add(&vma->evict_link, &eviction_list);
- }
- } while (*++phase);
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &eviction_list);
+ }
+ mutex_unlock(&vm->mutex);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 485b259127c3..02adcaf6ebea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -31,7 +31,6 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
-#include <drm/drmP.h>
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
@@ -754,6 +753,68 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
+static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
+{
+ struct i915_request *rq;
+
+ /*
+ * Completely unscientific finger-in-the-air estimates for suitable
+ * maximum user request size (to avoid blocking) and then backoff.
+ */
+ if (intel_ring_update_space(ring) >= PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Find a request that after waiting upon, there will be at least half
+ * the ring available. The hysteresis allows us to compete for the
+ * shared ring and should mean that we sleep less often prior to
+ * claiming our resources, but not so long that the ring completely
+ * drains before we can submit our next request.
+ */
+ list_for_each_entry(rq, &ring->request_list, ring_link) {
+ if (__intel_ring_space(rq->postfix,
+ ring->emit, ring->size) > ring->size / 2)
+ break;
+ }
+ if (&rq->ring_link == &ring->request_list)
+ return NULL; /* weird, we will check again later for real */
+
+ return i915_request_get(rq);
+}
+
+static int eb_wait_for_ring(const struct i915_execbuffer *eb)
+{
+ const struct intel_context *ce;
+ struct i915_request *rq;
+ int ret = 0;
+
+ /*
+ * Apply a light amount of backpressure to prevent excessive hogs
+ * from blocking waiting for space whilst holding struct_mutex and
+ * keeping all of their resources pinned.
+ */
+
+ ce = to_intel_context(eb->ctx, eb->engine);
+ if (!ce->ring) /* first use, assume empty! */
+ return 0;
+
+ rq = __eb_wait_for_ring(ce->ring);
+ if (rq) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ ret = -EINTR;
+
+ i915_request_put(rq);
+
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ }
+
+ return ret;
+}
+
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
@@ -1380,7 +1441,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- IS_GEN6(eb->i915)) {
+ IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
PIN_GLOBAL);
if (WARN_ONCE(err,
@@ -1896,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
+ if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -1977,6 +2038,18 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}
+ /*
+ * After we completed waiting for other engines (using HW semaphores)
+ * then we can signal that this request/batch is ready to run. This
+ * allows us to determine if the batch is still waiting on the GPU
+ * or actually running by checking the breadcrumb.
+ */
+ if (eb->engine->emit_init_breadcrumb) {
+ err = eb->engine->emit_init_breadcrumb(eb->request);
+ if (err)
+ return err;
+ }
+
err = eb->engine->emit_bb_start(eb->request,
eb->batch->node.start +
eb->batch_start_offset,
@@ -2203,6 +2276,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
+ intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
@@ -2273,12 +2347,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- intel_runtime_pm_get(eb.i915);
+ wakeref = intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
+ err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+ if (unlikely(err))
+ goto err_unlock;
+
err = eb_relocate(&eb);
if (err) {
/*
@@ -2423,9 +2501,10 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(eb.i915);
+ intel_runtime_pm_put(eb.i915, wakeref);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d548ac05ccd7..e037e94792f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,7 +21,6 @@
* IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
* and explicitly managed for internal users.
*/
- if (IS_GEN2(fence->i915))
+ if (IS_GEN(fence->i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN3(fence->i915))
+ else if (IS_GEN(fence->i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -210,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
+ intel_wakeref_t wakeref;
int ret;
if (vma) {
@@ -223,7 +223,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_gem_active_retire(&vma->last_fence,
+ ret = i915_active_request_retire(&vma->last_fence,
&vma->obj->base.dev->struct_mutex);
if (ret)
return ret;
@@ -232,7 +232,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
if (fence->vma) {
struct i915_vma *old = fence->vma;
- ret = i915_gem_active_retire(&old->last_fence,
+ ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
if (ret)
return ret;
@@ -257,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
- if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+ if (wakeref) {
fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915);
+ intel_runtime_pm_put(fence->i915, wakeref);
}
if (vma) {
@@ -554,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
void
i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
@@ -578,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
- uint32_t dimm_c0, dimm_c1;
+ u32 dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@@ -596,13 +597,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 5)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -610,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev_priv) ||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
- uint32_t dcc;
+ u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
@@ -647,7 +648,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev_priv) &&
+ if (IS_GEN(dev_priv, 4) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 99a31ded4dfd..09dcaf14121b 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -50,4 +50,3 @@ struct drm_i915_fence_reg {
};
#endif
-
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index bd17dd1f5da5..d646d37eec2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,11 +33,11 @@
#include <asm/set_memory.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
+#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -474,8 +474,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
spin_unlock(&vm->free_pages.lock);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -483,7 +482,8 @@ static void i915_address_space_init(struct i915_address_space *vm,
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
- i915_gem_shrinker_taints_mutex(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -491,9 +491,8 @@ static void i915_address_space_init(struct i915_address_space *vm,
stash_init(&vm->free_pages);
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->unbound_list);
+ INIT_LIST_HEAD(&vm->bound_list);
}
static void i915_address_space_fini(struct i915_address_space *vm)
@@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
-
- mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
}
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
@@ -1490,84 +1487,6 @@ unwind:
return -ENOMEM;
}
-static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- u64 start, u64 length,
- gen8_pte_t scratch_pte,
- struct seq_file *m)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pd;
- u32 pdpe;
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- struct i915_page_table *pt;
- u64 pd_len = length;
- u64 pd_start = start;
- u32 pde;
-
- if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
- continue;
-
- seq_printf(m, "\tPDPE #%d\n", pdpe);
- gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
- u32 pte;
- gen8_pte_t *pt_vaddr;
-
- if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
- continue;
-
- pt_vaddr = kmap_atomic_px(pt);
- for (pte = 0; pte < GEN8_PTES; pte += 4) {
- u64 va = (pdpe << GEN8_PDPE_SHIFT |
- pde << GEN8_PDE_SHIFT |
- pte << GEN8_PTE_SHIFT);
- int i;
- bool found = false;
-
- for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
- continue;
-
- seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
- for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %llx", pt_vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH ");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(pt_vaddr);
- }
- }
-}
-
-static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- const gen8_pte_t scratch_pte = vm->scratch_pte;
- u64 start = 0, length = ppgtt->vm.total;
-
- if (use_4lvl(vm)) {
- u64 pml4e;
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
-
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
- continue;
-
- seq_printf(m, " PML4E #%llu\n", pml4e);
- gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
- }
- } else {
- gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
- }
-}
-
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
@@ -1628,7 +1547,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
- i915_address_space_init(&ppgtt->vm, i915);
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
gen8_ppgtt_notify_vgt(ppgtt, true);
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->debug_dump = gen8_dump_ppgtt;
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -1688,60 +1606,6 @@ err_free:
return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
-{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- const gen6_pte_t scratch_pte = base->vm.scratch_pte;
- struct i915_page_table *pt;
- u32 pte, pde;
-
- gen6_for_all_pdes(pt, &base->pd, pde) {
- gen6_pte_t *vaddr;
-
- if (pt == base->vm.scratch_pt)
- continue;
-
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
- u32 expected =
- GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
- GEN6_PDE_VALID;
- u32 pd_entry = readl(ppgtt->pd_addr + pde);
-
- if (pd_entry != expected)
- seq_printf(m,
- "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
-
- seq_printf(m, "\tPDE: %x\n", pd_entry);
- }
-
- vaddr = kmap_atomic_px(base->pd.page_table[pde]);
- for (pte = 0; pte < GEN6_PTES; pte += 4) {
- int i;
-
- for (i = 0; i < 4; i++)
- if (vaddr[pte + i] != scratch_pte)
- break;
- if (i == 4)
- continue;
-
- seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
- pde, pte,
- (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
- for (i = 0; i < 4; i++) {
- if (vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(vaddr);
- }
-}
-
/* Write pde (index) from the page directory @pd to the page table @pt */
static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
@@ -2053,21 +1917,23 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- init_request_active(&vma->last_fence, NULL);
+ i915_active_init(i915, &vma->active, NULL);
+ INIT_ACTIVE_REQUEST(&vma->last_fence);
vma->vm = &ggtt->vm;
vma->ops = &pd_vma_ops;
vma->private = ppgtt;
- vma->active = RB_ROOT;
-
vma->size = size;
vma->fence_size = size;
vma->flags = I915_VMA_GGTT;
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
+
+ mutex_lock(&vma->vm->mutex);
list_add(&vma->vm_link, &vma->vm->unbound_list);
+ mutex_unlock(&vma->vm->mutex);
return vma;
}
@@ -2132,13 +1998,12 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
- i915_address_space_init(&ppgtt->base.vm, i915);
+ i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.debug_dump = gen6_dump_ppgtt;
ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -2204,9 +2069,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
gen7_ppgtt_enable(dev_priv);
return 0;
@@ -2247,8 +2112,7 @@ void i915_ppgtt_close(struct i915_address_space *vm)
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
- &vm->active_list,
- &vm->inactive_list,
+ &vm->bound_list,
&vm->unbound_list,
NULL,
}, **phase;
@@ -2271,8 +2135,7 @@ void i915_ppgtt_release(struct kref *kref)
ppgtt_destroy_vma(&ppgtt->vm);
- GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
- GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
ppgtt->vm.cleanup(&ppgtt->vm);
@@ -2288,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
+ return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
static void gen6_check_faults(struct drm_i915_private *dev_priv)
@@ -2381,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
DMA_ATTR_NO_WARN))
return 0;
- /* If the DMA remap fails, one cause can be that we have
+ /*
+ * If the DMA remap fails, one cause can be that we have
* too many objects pinned in a small remapping table,
* such as swiotlb. Incrementally purge all other objects and
* try again - if there are no more pages to remove from
@@ -2391,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
} while (i915_gem_shrink(to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE));
+ I915_SHRINK_UNBOUND));
return -ENOSPC;
}
@@ -2664,6 +2527,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
+ intel_wakeref_t wakeref;
u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@@ -2671,9 +2535,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -2690,10 +2553,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@@ -2725,9 +2588,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
if (flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ vma->vm->insert_entries(vma->vm, vma,
+ cache_level, pte_flags);
+ }
}
return 0;
@@ -2738,9 +2604,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref)
+ vm->clear_range(vm, vma->node.start, vma->size);
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
@@ -2932,8 +2800,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_fini_aliasing_ppgtt(dev_priv);
- GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
if (drm_mm_node_allocated(&ggtt->error_capture))
@@ -3364,7 +3231,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+ if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
@@ -3565,7 +3433,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->vm, dev_priv);
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
@@ -3638,32 +3506,39 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
+ mutex_lock(&ggtt->vm.mutex);
+
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!(vma->flags & I915_VMA_GLOBAL_BIND))
continue;
+ mutex_unlock(&ggtt->vm.mutex);
+
if (!i915_vma_unbind(vma))
- continue;
+ goto lock;
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_UPDATE));
if (obj)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+
+lock:
+ mutex_lock(&ggtt->vm.mutex);
}
ggtt->vm.closed = false;
i915_ggtt_invalidate(dev_priv);
+ mutex_unlock(&ggtt->vm.mutex);
+
if (INTEL_GEN(dev_priv) >= 8) {
struct intel_ppat *ppat = &dev_priv->ppat;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4874da09a3c4..03ade71b8d9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,6 +39,7 @@
#include <linux/pagevec.h>
#include "i915_request.h"
+#include "i915_reset.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
@@ -288,6 +289,8 @@ struct i915_address_space {
bool closed;
struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
u64 scratch_pte;
struct i915_page_dma scratch_page;
@@ -296,32 +299,12 @@ struct i915_address_space {
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
/**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_read_req
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
+ * List of vma currently bound.
*/
- struct list_head active_list;
+ struct list_head bound_list;
/**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_read_req is NULL while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /**
- * List of vma that have been unbound.
- *
- * A reference is not held on the buffer while on this list.
+ * List of vma that are not unbound.
*/
struct list_head unbound_list;
@@ -413,8 +396,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
-
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct gen6_hw_ppgtt {
@@ -661,19 +642,19 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT_ULL(0)
-#define PIN_MAPPABLE BIT_ULL(1)
-#define PIN_ZONE_4G BIT_ULL(2)
-#define PIN_NONFAULT BIT_ULL(3)
-#define PIN_NOEVICT BIT_ULL(4)
-
-#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT_ULL(8)
-
-#define PIN_HIGH BIT_ULL(9)
-#define PIN_OFFSET_BIAS BIT_ULL(10)
-#define PIN_OFFSET_FIXED BIT_ULL(11)
+#define PIN_NONFAULT BIT_ULL(1)
+#define PIN_NOEVICT BIT_ULL(2)
+#define PIN_MAPPABLE BIT_ULL(3)
+#define PIN_ZONE_4G BIT_ULL(4)
+#define PIN_HIGH BIT_ULL(5)
+#define PIN_OFFSET_BIAS BIT_ULL(6)
+#define PIN_OFFSET_FIXED BIT_ULL(7)
+
+#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT_ULL(11)
+
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 0d0144b2104c..fddde1033e74 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -22,7 +22,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index a6dd7c46de0d..fab040331cdb 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -29,7 +29,8 @@
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
#include <drm/i915_drm.h>
@@ -56,6 +57,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -85,24 +87,33 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- /**
- * @vma_list: List of VMAs backed by this object
- *
- * The VMA on this list are ordered by type, all GGTT vma are placed
- * at the head and all ppGTT vma are placed at the tail. The different
- * types of GGTT vma are unordered between themselves, use the
- * @vma_tree (which has a defined order between all VMA) to find an
- * exact match.
- */
- struct list_head vma_list;
- /**
- * @vma_tree: Ordered tree of VMAs backed by this object
- *
- * All VMA created for this object are placed in the @vma_tree for
- * fast retrieval via a binary search in i915_vma_instance().
- * They are also added to @vma_list for easy iteration.
- */
- struct rb_root vma_tree;
+ struct {
+ /**
+ * @vma.lock: protect the list/tree of vmas
+ */
+ spinlock_t lock;
+
+ /**
+ * @vma.list: List of VMAs backed by this object
+ *
+ * The VMA on this list are ordered by type, all GGTT vma are
+ * placed at the head and all ppGTT vma are placed at the tail.
+ * The different types of GGTT vma are unordered between
+ * themselves, use the @vma.tree (which has a defined order
+ * between all VMA) to quickly find an exact match.
+ */
+ struct list_head list;
+
+ /**
+ * @vma.tree: Ordered tree of VMAs backed by this object
+ *
+ * All VMA created for this object are placed in the @vma.tree
+ * for fast retrieval via a binary search in
+ * i915_vma_instance(). They are also added to @vma.list for
+ * easy iteration.
+ */
+ struct rb_root tree;
+ } vma;
/**
* @lut_list: List of vma lookup entries in use for this object.
@@ -164,7 +175,7 @@ struct drm_i915_gem_object {
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_gem_active frontbuffer_write;
+ struct i915_active_request frontbuffer_write;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -387,6 +398,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
+static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return obj->active_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index ea90d3a0d511..6da795c7e62e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -30,30 +30,27 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915,
+ unsigned int flags,
+ bool *unlock)
{
- switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
+ struct mutex *m = &i915->drm.struct_mutex;
+
+ switch (mutex_trylock_recursive(m)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
- preempt_disable();
- do {
- cpu_relax();
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- *unlock = true;
- break;
- }
- } while (!need_resched());
- preempt_enable();
+ if (flags & I915_SHRINK_ACTIVE &&
+ mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
+ *unlock = true;
return *unlock;
case MUTEX_TRYLOCK_SUCCESS:
@@ -156,11 +153,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
{ &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
+ intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(i915, &unlock))
+ if (!shrinker_lock(i915, flags, &unlock))
return 0;
/*
@@ -185,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
- if ((flags & I915_SHRINK_BOUND) &&
- !intel_runtime_pm_get_if_in_use(i915))
- flags &= ~I915_SHRINK_BOUND;
+ if (flags & I915_SHRINK_BOUND) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (!wakeref)
+ flags &= ~I915_SHRINK_BOUND;
+ }
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -268,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
@@ -295,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
- unsigned long freed;
-
- intel_runtime_pm_get(i915);
- freed = i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+ unsigned long freed = 0;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ freed = i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
+ }
return freed;
}
@@ -357,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, &unlock))
+ if (!shrinker_lock(i915, 0, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(i915,
@@ -373,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
- intel_runtime_pm_get(i915);
- freed += i915_gem_shrink(i915,
- sc->nr_to_scan - sc->nr_scanned,
- &sc->nr_scanned,
- I915_SHRINK_ACTIVE |
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ freed += i915_gem_shrink(i915,
+ sc->nr_to_scan - sc->nr_scanned,
+ &sc->nr_scanned,
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ }
}
shrinker_unlock(i915, unlock);
@@ -388,31 +391,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return sc->nr_scanned ? freed : SHRINK_STOP;
}
-static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
- int timeout_ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
-
- do {
- if (i915_gem_wait_for_idle(i915,
- 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
- shrinker_lock(i915, unlock))
- break;
-
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return false;
-
- if (time_after(jiffies, timeout)) {
- pr_err("Unable to lock GPU to purge memory.\n");
- return false;
- }
- } while (1);
-
- return true;
-}
-
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
@@ -420,8 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
+ intel_wakeref_t wakeref;
- freed_pages = i915_gem_shrink_all(i915);
+ freed_pages = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@@ -447,10 +430,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
pr_info("Purging GPU memory, %lu pages freed, "
"%lu pages still pinned.\n",
freed_pages, unevictable);
- if (unbound || bound)
- pr_err("%lu and %lu pages still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
@@ -463,34 +442,39 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
+ intel_wakeref_t wakeref;
bool unlock;
- int ret;
- if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
+ if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT))
goto out;
- intel_runtime_pm_get(i915);
- freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
+ mutex_lock(&i915->ggtt.vm.mutex);
list_for_each_entry_safe(vma, next,
- &i915->ggtt.vm.inactive_list, vm_link) {
+ &i915->ggtt.vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
- if (vma->iomap && i915_vma_unbind(vma) == 0)
+
+ if (!vma->iomap || i915_vma_is_active(vma))
+ continue;
+
+ mutex_unlock(&i915->ggtt.vm.mutex);
+ if (i915_vma_unbind(vma) == 0)
freed_pages += count;
+ mutex_lock(&i915->ggtt.vm.mutex);
}
+ mutex_unlock(&i915->ggtt.vm.mutex);
out:
shrinker_unlock(i915, unlock);
@@ -533,13 +517,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
unregister_shrinker(&i915->mm.shrinker);
}
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex)
{
+ bool unlock = false;
+
if (!IS_ENABLED(CONFIG_LOCKDEP))
return;
+ if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
+ mutex_acquire(&i915->drm.struct_mutex.dep_map,
+ I915_MM_NORMAL, 0, _RET_IP_);
+ unlock = true;
+ }
+
fs_reclaim_acquire(GFP_KERNEL);
- mutex_lock(mutex);
- mutex_unlock(mutex);
+
+ /*
+ * As we invariably rely on the struct_mutex within the shrinker,
+ * but have a complicated recursion dance, taint all the mutexes used
+ * within the shrinker with the struct_mutex. For completeness, we
+ * taint with all subclass of struct_mutex, even though we should
+ * only need tainting by I915_MM_NORMAL to catch possible ABBA
+ * deadlocks from using struct_mutex inside @mutex.
+ */
+ mutex_acquire(&i915->drm.struct_mutex.dep_map,
+ I915_MM_SHRINKER, 0, _RET_IP_);
+
+ mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, 0, _RET_IP_);
+
+ mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+
fs_reclaim_release(GFP_KERNEL);
+
+ if (unlock)
+ mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f29a7ff7c362..74a9661479ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -26,7 +26,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev_priv)) {
+ if (r == NULL && !IS_GEN(dev_priv, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
+ WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+ reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
@@ -701,7 +701,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
+
+ mutex_lock(&ggtt->vm.mutex);
+ list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+ mutex_unlock(&ggtt->vm.mutex);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d9dc9df523b5..16cc9ddbce34 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -27,7 +27,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
}
/* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN3(i915))
+ if (IS_GEN(i915, 3))
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (IS_GEN2(i915) ||
+ if (IS_GEN(i915, 2) ||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
tile_width = 128;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 9558582c105e..1d3f9a31ad61 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -22,7 +22,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,77 +49,67 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root_cached objects;
- struct workqueue_struct *wq;
+ struct i915_mm_struct *mm;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mn;
struct drm_i915_gem_object *obj;
struct interval_tree_node it;
- struct list_head link;
- struct work_struct work;
- bool attached;
};
-static void cancel_userptr(struct work_struct *work)
+static void add_object(struct i915_mmu_object *mo)
{
- struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
- struct drm_i915_gem_object *obj = mo->obj;
- struct work_struct *active;
-
- /* Cancel any active worker and force us to re-evaluate gup */
- mutex_lock(&obj->mm.lock);
- active = fetch_and_zero(&obj->userptr.work);
- mutex_unlock(&obj->mm.lock);
- if (active)
- goto out;
-
- i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
-
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* We are inside a kthread context and can't be interrupted */
- if (i915_gem_object_unbind(obj) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- WARN_ONCE(i915_gem_object_has_pages(obj),
- "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
- obj->bind_count,
- atomic_read(&obj->mm.pages_pin_count),
- obj->pin_global);
-
- mutex_unlock(&obj->base.dev->struct_mutex);
-
-out:
- i915_gem_object_put(obj);
+ GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
+ interval_tree_insert(&mo->it, &mo->mn->objects);
}
-static void add_object(struct i915_mmu_object *mo)
+static void del_object(struct i915_mmu_object *mo)
{
- if (mo->attached)
+ if (RB_EMPTY_NODE(&mo->it.rb))
return;
- interval_tree_insert(&mo->it, &mo->mn->objects);
- mo->attached = true;
+ interval_tree_remove(&mo->it, &mo->mn->objects);
+ RB_CLEAR_NODE(&mo->it.rb);
}
-static void del_object(struct i915_mmu_object *mo)
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
- if (!mo->attached)
+ struct i915_mmu_object *mo = obj->userptr.mmu_object;
+
+ /*
+ * During mm_invalidate_range we need to cancel any userptr that
+ * overlaps the range being invalidated. Doing so requires the
+ * struct_mutex, and that risks recursion. In order to cause
+ * recursion, the user must alias the userptr address space with
+ * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+ * to invalidate that mmaping, mm_invalidate_range is called with
+ * the userptr address *and* the struct_mutex held. To prevent that
+ * we set a flag under the i915_mmu_notifier spinlock to indicate
+ * whether this object is valid.
+ */
+ if (!mo)
return;
- interval_tree_remove(&mo->it, &mo->mn->objects);
- mo->attached = false;
+ spin_lock(&mo->mn->lock);
+ if (value)
+ add_object(mo);
+ else
+ del_object(mo);
+ spin_unlock(&mo->mn->lock);
}
-static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- const struct mmu_notifier_range *range)
+static int
+userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
- struct i915_mmu_object *mo;
struct interval_tree_node *it;
- LIST_HEAD(cancelled);
+ struct mutex *unlock = NULL;
unsigned long end;
+ int ret = 0;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
@@ -131,11 +120,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
+ struct drm_i915_gem_object *obj;
+
if (!range->blockable) {
- spin_unlock(&mn->lock);
- return -EAGAIN;
+ ret = -EAGAIN;
+ break;
}
- /* The mmu_object is released late when destroying the
+
+ /*
+ * The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
@@ -144,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
* use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
- mo = container_of(it, struct i915_mmu_object, it);
- if (kref_get_unless_zero(&mo->obj->base.refcount))
- queue_work(mn->wq, &mo->work);
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ if (!kref_get_unless_zero(&obj->base.refcount)) {
+ it = interval_tree_iter_next(it, range->start, end);
+ continue;
+ }
+ spin_unlock(&mn->lock);
+
+ if (!unlock) {
+ unlock = &mn->mm->i915->drm.struct_mutex;
+
+ switch (mutex_trylock_recursive(unlock)) {
+ default:
+ case MUTEX_TRYLOCK_FAILED:
+ if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
+ i915_gem_object_put(obj);
+ return -EINTR;
+ }
+ /* fall through */
+ case MUTEX_TRYLOCK_SUCCESS:
+ break;
+
+ case MUTEX_TRYLOCK_RECURSIVE:
+ unlock = ERR_PTR(-EEXIST);
+ break;
+ }
+ }
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret == 0)
+ ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ i915_gem_object_put(obj);
+ if (ret)
+ goto unlock;
- list_add(&mo->link, &cancelled);
- it = interval_tree_iter_next(it, range->start, end);
+ spin_lock(&mn->lock);
+
+ /*
+ * As we do not (yet) protect the mmu from concurrent insertion
+ * over this range, there is no guarantee that this search will
+ * terminate given a pathologic workload.
+ */
+ it = interval_tree_iter_first(&mn->objects, range->start, end);
}
- list_for_each_entry(mo, &cancelled, link)
- del_object(mo);
spin_unlock(&mn->lock);
- if (!list_empty(&cancelled))
- flush_workqueue(mn->wq);
+unlock:
+ if (!IS_ERR_OR_NULL(unlock))
+ mutex_unlock(unlock);
+
+ return ret;
- return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
- .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+ .invalidate_range_start = userptr_mn_invalidate_range_start,
};
static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct mm_struct *mm)
+i915_mmu_notifier_create(struct i915_mm_struct *mm)
{
struct i915_mmu_notifier *mn;
@@ -177,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 0);
- if (mn->wq == NULL) {
- kfree(mn);
- return ERR_PTR(-ENOMEM);
- }
+ mn->mm = mm;
return mn;
}
@@ -193,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
struct i915_mmu_object *mo;
- mo = obj->userptr.mmu_object;
- if (mo == NULL)
+ mo = fetch_and_zero(&obj->userptr.mmu_object);
+ if (!mo)
return;
spin_lock(&mo->mn->lock);
del_object(mo);
spin_unlock(&mo->mn->lock);
kfree(mo);
-
- obj->userptr.mmu_object = NULL;
}
static struct i915_mmu_notifier *
@@ -215,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (mn)
return mn;
- mn = i915_mmu_notifier_create(mm->mm);
+ mn = i915_mmu_notifier_create(mm);
if (IS_ERR(mn))
err = PTR_ERR(mn);
@@ -238,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
- if (mn && !IS_ERR(mn)) {
- destroy_workqueue(mn->wq);
+ if (mn && !IS_ERR(mn))
kfree(mn);
- }
return err ? ERR_PTR(err) : mm->mn;
}
@@ -264,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return PTR_ERR(mn);
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
- if (mo == NULL)
+ if (!mo)
return -ENOMEM;
mo->mn = mn;
mo->obj = obj;
mo->it.start = obj->userptr.ptr;
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
- INIT_WORK(&mo->work, cancel_userptr);
+ RB_CLEAR_NODE(&mo->it.rb);
obj->userptr.mmu_object = mo;
return 0;
@@ -285,13 +304,17 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
- destroy_workqueue(mn->wq);
kfree(mn);
}
#else
static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+{
+}
+
+static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
}
@@ -459,42 +482,6 @@ alloc_table:
return st;
}
-static int
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
- bool value)
-{
- int ret = 0;
-
- /* During mm_invalidate_range we need to cancel any userptr that
- * overlaps the range being invalidated. Doing so requires the
- * struct_mutex, and that risks recursion. In order to cause
- * recursion, the user must alias the userptr address space with
- * a GTT mmapping (possible with a MAP_FIXED) - then when we have
- * to invalidate that mmaping, mm_invalidate_range is called with
- * the userptr address *and* the struct_mutex held. To prevent that
- * we set a flag under the i915_mmu_notifier spinlock to indicate
- * whether this object is valid.
- */
-#if defined(CONFIG_MMU_NOTIFIER)
- if (obj->userptr.mmu_object == NULL)
- return 0;
-
- spin_lock(&obj->userptr.mmu_object->mn->lock);
- /* In order to serialise get_pages with an outstanding
- * cancel_userptr, we must drop the struct_mutex and try again.
- */
- if (!value)
- del_object(obj->userptr.mmu_object);
- else if (!work_pending(&obj->userptr.mmu_object->work))
- add_object(obj->userptr.mmu_object);
- else
- ret = -EAGAIN;
- spin_unlock(&obj->userptr.mmu_object->mn->lock);
-#endif
-
- return ret;
-}
-
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@@ -680,8 +667,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- BUG_ON(obj->userptr.work != NULL);
+ /* Cancel any inflight work and force them to restart their gup */
+ obj->userptr.work = NULL;
__i915_gem_userptr_set_active(obj, false);
+ if (!pages)
+ return;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
@@ -719,7 +709,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3f9ce403c755..9a65341fec09 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -447,9 +447,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno, erq->sched_attr.priority,
+ erq->context, erq->seqno,
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &erq->flags) ? "!" : "",
+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &erq->flags) ? "+" : "",
+ erq->sched_attr.priority,
jiffies_to_msecs(erq->jiffies - epoch),
erq->start, erq->head, erq->tail);
}
@@ -530,13 +535,9 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
}
err_printf(m, " seqno: 0x%08x\n", ee->seqno);
err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
- err_printf(m, " waiting: %s\n", yesno(ee->waiting));
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
- err_printf(m, " hangcheck action: %s\n",
- hangcheck_action_to_str(ee->hangcheck_action));
- err_printf(m, " hangcheck action timestamp: %dms (%lu%s)\n",
+ err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
ee->hangcheck_timestamp,
ee->hangcheck_timestamp == epoch ? "; epoch" : "");
@@ -594,13 +595,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
const struct intel_driver_caps *caps)
{
struct drm_printer p = i915_error_printer(m);
intel_device_info_dump_flags(info, &p);
intel_driver_caps_print(caps, &p);
- intel_device_info_dump_topology(&info->sseu, &p);
+ intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -664,7 +666,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Kernel: %s\n", init_utsname()->release);
+ err_printf(m, "Kernel: %s %s\n",
+ init_utsname()->release,
+ init_utsname()->machine);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -681,15 +685,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(error->capture - error->epoch));
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].hangcheck_stalled &&
- error->engine[i].context.pid) {
- err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
- engine_name(m->i915, i),
- error->engine[i].context.comm,
- error->engine[i].context.pid,
- error->engine[i].context.ban_score,
- bannable(&error->engine[i].context));
- }
+ if (!error->engine[i].context.pid)
+ continue;
+
+ err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+ engine_name(m->i915, i),
+ error->engine[i].context.comm,
+ error->engine[i].context.pid,
+ error->engine[i].context.ban_score,
+ bannable(&error->engine[i].context));
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
@@ -719,8 +723,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n",
- m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -735,7 +737,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(m->i915))
+ if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -804,21 +806,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
error->epoch);
}
- if (IS_ERR(ee->waiters)) {
- err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- m->i915->engine[i]->name);
- } else if (ee->num_waiters) {
- err_printf(m, "%s --- %d waiters\n",
- m->i915->engine[i]->name,
- ee->num_waiters);
- for (j = 0; j < ee->num_waiters; j++) {
- err_printf(m, " seqno 0x%08x for %s [%d]\n",
- ee->waiters[j].seqno,
- ee->waiters[j].comm,
- ee->waiters[j].pid);
- }
- }
-
print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
@@ -844,7 +831,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info, &error->driver_caps);
+ err_print_capabilities(m, &error->device_info, &error->runtime_info,
+ &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
}
@@ -963,17 +951,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static __always_inline void free_param(const char *type, void *x)
-{
- if (!__builtin_strcmp(type, "char *"))
- kfree(*(void **)x);
-}
static void cleanup_params(struct i915_gpu_state *error)
{
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+ i915_params_free(&error->params);
}
static void cleanup_uc_state(struct i915_gpu_state *error)
@@ -1006,8 +987,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
i915_error_object_free(ee->wa_ctx);
kfree(ee->requests);
- if (!IS_ERR_OR_NULL(ee->waiters))
- kfree(ee->waiters);
}
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
@@ -1037,7 +1016,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dma_addr_t dma;
int ret;
- if (!vma)
+ if (!vma || !vma->pages)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -1083,23 +1062,23 @@ i915_error_object_create(struct drm_i915_private *i915,
}
/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_gem_active lookup.
+ * locking rules - so we use the raw version of the i915_active_request lookup.
*/
-static inline uint32_t
-__active_get_seqno(struct i915_gem_active *active)
+static inline u32
+__active_get_seqno(struct i915_active_request *active)
{
struct i915_request *request;
- request = __i915_gem_active_peek(active);
+ request = __i915_active_request_peek(active);
return request ? request->global_seqno : 0;
}
static inline int
-__active_get_engine_id(struct i915_gem_active *active)
+__active_get_engine_id(struct i915_active_request *active)
{
struct i915_request *request;
- request = __i915_gem_active_peek(active);
+ request = __i915_active_request_peek(active);
return request ? request->engine->id : -1;
}
@@ -1127,7 +1106,9 @@ static void capture_bo(struct drm_i915_error_buffer *err,
static u32 capture_error_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head,
- bool pinned_only)
+ unsigned int flags)
+#define ACTIVE_ONLY BIT(0)
+#define PINNED_ONLY BIT(1)
{
struct i915_vma *vma;
int i = 0;
@@ -1136,7 +1117,10 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
if (!vma->obj)
continue;
- if (pinned_only && !i915_vma_is_pinned(vma))
+ if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
+ continue;
+
+ if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
continue;
capture_bo(err++, vma);
@@ -1147,7 +1131,8 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
return i;
}
-/* Generate a semi-unique error code. The code is not meant to have meaning, The
+/*
+ * Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
* grossly estimating a GPU error state.
*
@@ -1156,29 +1141,23 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
*
* It's only a small step better than a random number in its current form.
*/
-static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error,
- int *engine_id)
+static u32 i915_error_generate_code(struct i915_gpu_state *error,
+ unsigned long engine_mask)
{
- uint32_t error_code = 0;
- int i;
-
- /* IPEHR would be an ideal way to detect errors, as it's the gross
+ /*
+ * IPEHR would be an ideal way to detect errors, as it's the gross
* measure of "the command that hung." However, has some very common
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (error->engine[i].hangcheck_stalled) {
- if (engine_id)
- *engine_id = i;
+ if (engine_mask) {
+ struct drm_i915_error_engine *ee =
+ &error->engine[ffs(engine_mask)];
- return error->engine[i].ipehr ^
- error->engine[i].instdone.instdone;
- }
+ return ee->ipehr ^ ee->instdone.instdone;
}
- return error_code;
+ return 0;
}
static void gem_record_fences(struct i915_gpu_state *error)
@@ -1211,59 +1190,6 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
I915_READ(RING_SYNC_2(engine->mmio_base));
}
-static void error_record_engine_waiters(struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_error_waiter *waiter;
- struct rb_node *rb;
- int count;
-
- ee->num_waiters = 0;
- ee->waiters = NULL;
-
- if (RB_EMPTY_ROOT(&b->waiters))
- return;
-
- if (!spin_trylock_irq(&b->rb_lock)) {
- ee->waiters = ERR_PTR(-EDEADLK);
- return;
- }
-
- count = 0;
- for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
- count++;
- spin_unlock_irq(&b->rb_lock);
-
- waiter = NULL;
- if (count)
- waiter = kmalloc_array(count,
- sizeof(struct drm_i915_error_waiter),
- GFP_ATOMIC);
- if (!waiter)
- return;
-
- if (!spin_trylock_irq(&b->rb_lock)) {
- kfree(waiter);
- ee->waiters = ERR_PTR(-EDEADLK);
- return;
- }
-
- ee->waiters = waiter;
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
- strcpy(waiter->comm, w->tsk->comm);
- waiter->pid = w->tsk->pid;
- waiter->seqno = w->seqno;
- waiter++;
-
- if (++ee->num_waiters == count)
- break;
- }
- spin_unlock_irq(&b->rb_lock);
-}
-
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
@@ -1299,7 +1225,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
intel_engine_get_instdone(engine, &ee->instdone);
- ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine);
@@ -1314,7 +1239,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
case RCS:
@@ -1330,7 +1255,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->i915)) {
+ } else if (IS_GEN(engine->i915, 6)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -1341,9 +1266,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
ee->idle = intel_engine_is_idle(engine);
- ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
- ee->hangcheck_action = engine->hangcheck.action;
- ee->hangcheck_stalled = engine->hangcheck.stalled;
+ if (!ee->idle)
+ ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
@@ -1352,10 +1276,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_GEN(dev_priv) >= 8)
@@ -1374,6 +1298,7 @@ static void record_request(struct i915_request *request,
{
struct i915_gem_context *ctx = request->gem_context;
+ erq->flags = request->fence.flags;
erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr;
erq->ban_score = atomic_read(&ctx->ban_score);
@@ -1549,7 +1474,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->engine_id = i;
error_record_engine_registers(error, engine, ee);
- error_record_engine_waiters(engine, ee);
error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine);
@@ -1613,14 +1537,17 @@ static void gem_capture_vm(struct i915_gpu_state *error,
int count;
count = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
- count++;
+ list_for_each_entry(vma, &vm->bound_list, vm_link)
+ if (i915_vma_is_active(vma))
+ count++;
active_bo = NULL;
if (count)
active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
- count = capture_error_bo(active_bo, count, &vm->active_list, false);
+ count = capture_error_bo(active_bo,
+ count, &vm->bound_list,
+ ACTIVE_ONLY);
else
count = 0;
@@ -1658,28 +1585,20 @@ static void capture_pinned_buffers(struct i915_gpu_state *error)
struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo;
struct i915_vma *vma;
- int count_inactive, count_active;
-
- count_inactive = 0;
- list_for_each_entry(vma, &vm->inactive_list, vm_link)
- count_inactive++;
+ int count;
- count_active = 0;
- list_for_each_entry(vma, &vm->active_list, vm_link)
- count_active++;
+ count = 0;
+ list_for_each_entry(vma, &vm->bound_list, vm_link)
+ count++;
bo = NULL;
- if (count_inactive + count_active)
- bo = kcalloc(count_inactive + count_active,
- sizeof(*bo), GFP_ATOMIC);
+ if (count)
+ bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
if (!bo)
return;
- count_inactive = capture_error_bo(bo, count_inactive,
- &vm->active_list, true);
- count_active = capture_error_bo(bo + count_inactive, count_active,
- &vm->inactive_list, true);
- error->pinned_bo_count = count_inactive + count_active;
+ error->pinned_bo_count =
+ capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
error->pinned_bo = bo;
}
@@ -1725,7 +1644,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
error->err_int = I915_READ(GEN7_ERR_INT);
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1733,7 +1652,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
@@ -1753,7 +1672,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ccid = I915_READ(CCID);
/* 3: Feature specific registers */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 6, 7)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
@@ -1777,7 +1696,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
@@ -1786,31 +1705,35 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->pgtbl_er = I915_READ(PGTBL_ER);
}
-static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error,
- u32 engine_mask,
- const char *error_msg)
+static const char *
+error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
{
- u32 ecode;
- int engine_id = -1, len;
+ int len;
+ int i;
- ecode = i915_error_generate_code(dev_priv, error, &engine_id);
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++)
+ if (!error->engine[i].context.pid)
+ engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%d:0x%08x",
- INTEL_GEN(dev_priv), engine_id, ecode);
-
- if (engine_id != -1 && error->engine[engine_id].context.pid)
+ "GPU HANG: ecode %d:%lx:0x%08x",
+ INTEL_GEN(error->i915), engines,
+ i915_error_generate_code(error, engines));
+ if (engines) {
+ /* Just show the first executing process, more is confusing */
+ i = ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine[engine_id].context.comm,
- error->engine[engine_id].context.pid);
+ error->engine[i].context.comm,
+ error->engine[i].context.pid);
+ }
+ if (msg)
+ len += scnprintf(error->error_msg + len,
+ sizeof(error->error_msg) - len,
+ ", %s", msg);
- scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
- ", reason: %s, action: %s",
- error_msg,
- engine_mask ? "reset" : "continue");
+ return error->error_msg;
}
static void capture_gen_state(struct i915_gpu_state *error)
@@ -1831,21 +1754,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
+ memcpy(&error->runtime_info,
+ RUNTIME_INFO(i915),
+ sizeof(error->runtime_info));
error->driver_caps = i915->caps;
}
-static __always_inline void dup_param(const char *type, void *x)
-{
- if (!__builtin_strcmp(type, "char *"))
- *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
-}
-
static void capture_params(struct i915_gpu_state *error)
{
- error->params = i915_modparams;
-#define DUP(T, x, ...) dup_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(DUP);
-#undef DUP
+ i915_params_copy(&error->params, &i915_modparams);
}
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
@@ -1856,7 +1773,7 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
const struct drm_i915_error_engine *ee = &error->engine[i];
- if (ee->hangcheck_stalled &&
+ if (ee->hangcheck_timestamp &&
time_before(ee->hangcheck_timestamp, epoch))
epoch = ee->hangcheck_timestamp;
}
@@ -1930,7 +1847,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* i915_capture_error_state - capture an error record for later analysis
* @i915: i915 device
* @engine_mask: the mask of engines triggering the hang
- * @error_msg: a message to insert into the error capture header
+ * @msg: a message to insert into the error capture header
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
@@ -1938,8 +1855,8 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* to pick up.
*/
void i915_capture_error_state(struct drm_i915_private *i915,
- u32 engine_mask,
- const char *error_msg)
+ unsigned long engine_mask,
+ const char *msg)
{
static bool warned;
struct i915_gpu_state *error;
@@ -1955,8 +1872,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
if (IS_ERR(error))
return;
- i915_error_capture_msg(i915, error, engine_mask, error_msg);
- DRM_INFO("%s\n", error->error_msg);
+ dev_info(i915->drm.dev, "%s\n", error_msg(error, engine_mask, msg));
if (!error->simulated) {
spin_lock_irqsave(&i915->gpu_error.lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ff2652bbb0b0..53b1f22dd365 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -45,6 +45,7 @@ struct i915_gpu_state {
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct intel_runtime_info runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
@@ -81,11 +82,7 @@ struct i915_gpu_state {
int engine_id;
/* Software tracked state */
bool idle;
- bool waiting;
- int num_waiters;
unsigned long hangcheck_timestamp;
- bool hangcheck_stalled;
- enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
u32 reset_count;
@@ -148,6 +145,7 @@ struct i915_gpu_state {
struct drm_i915_error_object *default_state;
struct drm_i915_error_request {
+ unsigned long flags;
long jiffies;
pid_t pid;
u32 context;
@@ -160,12 +158,6 @@ struct i915_gpu_state {
} *requests, execlist[EXECLIST_MAX_PORTS];
unsigned int num_ports;
- struct drm_i915_error_waiter {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- u32 seqno;
- } *waiters;
-
struct {
u32 gfx_mode;
union {
@@ -196,6 +188,8 @@ struct i915_gpu_state {
struct scatterlist *sgl, *fit;
};
+struct i915_gpu_restart;
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -210,8 +204,6 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
- unsigned long missed_irq_rings;
-
/**
* State variable controlling the reset flow and count
*
@@ -246,15 +238,6 @@ struct i915_gpu_error {
* i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
* secondary role in preventing two concurrent global reset attempts.
*
- * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
- * struct_mutex. We try to acquire the struct_mutex in the reset worker,
- * but it may be held by some long running waiter (that we cannot
- * interrupt without causing trouble). Once we are ready to do the GPU
- * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
- * they already hold the struct_mutex and want to participate they can
- * inspect the bit and do the reset directly, otherwise the worker
- * waits for the struct_mutex.
- *
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
* flag to prevent two concurrent reset attempts in the same engine.
@@ -268,19 +251,14 @@ struct i915_gpu_error {
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
-#define I915_RESET_HANDOFF 1
-#define I915_RESET_MODESET 2
+#define I915_RESET_MODESET 1
+#define I915_RESET_ENGINE 2
#define I915_WEDGED (BITS_PER_LONG - 1)
-#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
- /** Set of stalled engines with guilty requests, in the current reset */
- u32 stalled_mask;
-
- /** Reason for the current *global* reset */
- const char *reason;
+ struct mutex wedge_mutex; /* serialises wedging/unwedging */
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
@@ -294,8 +272,7 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
- /* For missed irq/seqno simulation. */
- unsigned long test_irq_rings;
+ struct i915_gpu_restart *restart;
};
struct drm_i915_error_state_buf {
@@ -317,7 +294,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
- u32 engine_mask,
+ unsigned long engine_mask,
const char *error_msg);
static inline struct i915_gpu_state *
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index e869daf9c8a9..c1007245f46d 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,8 +28,8 @@
*/
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
+#include <drm/drm_ioctl.h>
#include "i915_drv.h"
struct drm_i915_getparam32 {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d447d7d508f4..441d2674b272 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,7 +31,8 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drmP.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_drv.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -224,10 +225,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits)
+ u32 mask,
+ u32 bits)
{
- uint32_t val;
+ u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
@@ -251,8 +252,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* version is also available.
*/
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits)
+ u32 mask,
+ u32 bits)
{
spin_lock_irq(&dev_priv->irq_lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
@@ -301,10 +302,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -331,8 +332,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -346,13 +347,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
POSTING_READ_FW(GTIMR);
}
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -391,10 +392,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -578,11 +579,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
- uint32_t old_val;
+ u32 new_val;
+ u32 old_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -612,10 +613,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
*/
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -642,10 +643,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -822,11 +823,26 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ const struct drm_display_mode *mode = &vblank->hwmode;
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
unsigned long irqflags;
+ /*
+ * On i965gm TV output the frame counter only works up to
+ * the point when we enable the TV encoder. After that the
+ * frame counter ceases to work and reads zero. We need a
+ * vblank wait before enabling the TV encoder and so we
+ * have to enable vblank interrupts while the frame counter
+ * is still in a working state. However the core vblank code
+ * does not like us returning non-zero frame counter values
+ * when we've told it that we don't have a working frame
+ * counter. Thus we must stop non-zero values leaking out.
+ */
+ if (!vblank->max_vblank_count)
+ return 0;
+
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
vbl_start = mode->crtc_vblank_start;
@@ -950,7 +966,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -998,6 +1014,9 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
+ bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
+ IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
+ mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (WARN_ON(!mode->crtc_clock)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -1030,7 +1049,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+ if (use_scanline_counter) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1090,7 +1109,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+ if (use_scanline_counter) {
*vpos = position;
*hpos = 0;
} else {
@@ -1152,76 +1171,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
return;
}
-static void notify_ring(struct intel_engine_cs *engine)
-{
- const u32 seqno = intel_engine_get_seqno(engine);
- struct i915_request *rq = NULL;
- struct task_struct *tsk = NULL;
- struct intel_wait *wait;
-
- if (unlikely(!engine->breadcrumbs.irq_armed))
- return;
-
- rcu_read_lock();
-
- spin_lock(&engine->breadcrumbs.irq_lock);
- wait = engine->breadcrumbs.irq_wait;
- if (wait) {
- /*
- * We use a callback from the dma-fence to submit
- * requests after waiting on our own requests. To
- * ensure minimum delay in queuing the next request to
- * hardware, signal the fence now rather than wait for
- * the signaler to be woken up. We still wake up the
- * waiter in order to handle the irq-seqno coherency
- * issues (we may receive the interrupt before the
- * seqno is written, see __i915_request_irq_complete())
- * and to handle coalescing of multiple seqno updates
- * and many waiters.
- */
- if (i915_seqno_passed(seqno, wait->seqno)) {
- struct i915_request *waiter = wait->request;
-
- if (waiter &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &waiter->fence.flags) &&
- intel_wait_check_request(wait, waiter))
- rq = i915_request_get(waiter);
-
- tsk = wait->tsk;
- } else {
- if (engine->irq_seqno_barrier &&
- i915_seqno_passed(seqno, wait->seqno - 1)) {
- set_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted);
- tsk = wait->tsk;
- }
- }
-
- engine->breadcrumbs.irq_count++;
- } else {
- if (engine->breadcrumbs.irq_armed)
- __intel_engine_disarm_breadcrumbs(engine);
- }
- spin_unlock(&engine->breadcrumbs.irq_lock);
-
- if (rq) {
- spin_lock(&rq->lock);
- dma_fence_signal_locked(&rq->fence);
- GEM_BUG_ON(!i915_request_completed(rq));
- spin_unlock(&rq->lock);
-
- i915_request_put(rq);
- }
-
- if (tsk && tsk->state & TASK_NORMAL)
- wake_up_process(tsk);
-
- rcu_read_unlock();
-
- trace_intel_engine_notify(engine, wait);
-}
-
static void vlv_c0_read(struct drm_i915_private *dev_priv,
struct intel_rps_ei *ei)
{
@@ -1376,8 +1325,8 @@ static void ivybridge_parity_work(struct work_struct *work)
container_of(work, typeof(*dev_priv), l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
- uint32_t misccpctl;
- uint8_t slice = 0;
+ u32 misccpctl;
+ u8 slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@@ -1466,20 +1415,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev_priv->engine[BCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1499,7 +1448,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
- notify_ring(engine);
+ intel_engine_breadcrumbs_irq(engine);
tasklet |= USES_GUC_SUBMISSION(engine->i915);
}
@@ -1738,13 +1687,13 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
#if defined(CONFIG_DEBUG_FS)
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t crc0, uint32_t crc1,
- uint32_t crc2, uint32_t crc3,
- uint32_t crc4)
+ u32 crc0, u32 crc1,
+ u32 crc2, u32 crc3,
+ u32 crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- uint32_t crcs[5];
+ u32 crcs[5];
spin_lock(&pipe_crc->lock);
/*
@@ -1776,9 +1725,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static inline void
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t crc0, uint32_t crc1,
- uint32_t crc2, uint32_t crc3,
- uint32_t crc4) {}
+ u32 crc0, u32 crc1,
+ u32 crc2, u32 crc3,
+ u32 crc4) {}
#endif
@@ -1804,7 +1753,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t res1, res2;
+ u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
@@ -1845,7 +1794,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(dev_priv->engine[VECS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -2547,7 +2496,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIIR, pch_iir);
}
- if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+ if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
ironlake_rps_change_irq_handler(dev_priv);
}
@@ -2938,46 +2887,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- dev_err(w->i915->drm.dev,
- "%s timed out, cancelling all in-flight rendering.\n",
- w->name);
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __fini_wedge((W)))
-
static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
@@ -3188,203 +3097,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void i915_reset_device(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *reason)
-{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
- char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
- char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
- char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- struct wedge_me w;
-
- kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
-
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
- /* Use a watchdog to ensure that our reset completes */
- i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
- intel_prepare_reset(dev_priv);
-
- error->reason = reason;
- error->stalled_mask = engine_mask;
-
- /* Signal that locked waiters should reset the GPU */
- smp_mb__before_atomic();
- set_bit(I915_RESET_HANDOFF, &error->flags);
- wake_up_all(&error->wait_queue);
-
- /* Wait for anyone holding the lock to wakeup, without
- * blocking indefinitely on struct_mutex.
- */
- do {
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- i915_reset(dev_priv, engine_mask, reason);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
- } while (wait_on_bit_timeout(&error->flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE,
- 1));
-
- error->stalled_mask = 0;
- error->reason = NULL;
-
- intel_finish_reset(dev_priv);
- }
-
- if (!test_bit(I915_WEDGED, &error->flags))
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
-}
-
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
-{
- u32 eir;
-
- if (!IS_GEN2(dev_priv))
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
-
- if (INTEL_GEN(dev_priv) < 4)
- I915_WRITE(IPEIR, I915_READ(IPEIR));
- else
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
-
- I915_WRITE(EIR, I915_READ(EIR));
- eir = I915_READ(EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
- }
-
- if (INTEL_GEN(dev_priv) >= 8) {
- I915_WRITE(GEN8_RING_FAULT_REG,
- I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
- POSTING_READ(GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- I915_WRITE(RING_FAULT_REG(engine),
- I915_READ(RING_FAULT_REG(engine)) &
- ~RING_FAULT_VALID);
- }
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
- }
-}
-
-/**
- * i915_handle_error - handle a gpu error
- * @dev_priv: i915 device private
- * @engine_mask: mask representing engines that are hung
- * @flags: control flags
- * @fmt: Error message format string
- *
- * Do some basic checking of register state at error time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-void i915_handle_error(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- unsigned long flags,
- const char *fmt, ...)
-{
- struct intel_engine_cs *engine;
- unsigned int tmp;
- char error_msg[80];
- char *msg = NULL;
-
- if (fmt) {
- va_list args;
-
- va_start(args, fmt);
- vscnprintf(error_msg, sizeof(error_msg), fmt, args);
- va_end(args);
-
- msg = error_msg;
- }
-
- /*
- * In most cases it's guaranteed that we get here with an RPM
- * reference held, for example because there is a pending GPU
- * request that won't finish until the reset is done. This
- * isn't the case at least when we get here by doing a
- * simulated reset via debugfs, so get an RPM reference.
- */
- intel_runtime_pm_get(dev_priv);
-
- engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
-
- if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(dev_priv, engine_mask, msg);
- i915_clear_error_registers(dev_priv);
- }
-
- /*
- * Try engine reset when available. We fall back to full reset if
- * single reset fails.
- */
- if (intel_has_reset_engine(dev_priv) &&
- !i915_terminally_wedged(&dev_priv->gpu_error)) {
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags))
- continue;
-
- if (i915_reset_engine(engine, msg) == 0)
- engine_mask &= ~intel_engine_flag(engine);
-
- clear_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags);
- wake_up_bit(&dev_priv->gpu_error.flags,
- I915_RESET_ENGINE + engine->id);
- }
- }
-
- if (!engine_mask)
- goto out;
-
- /* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
- wait_event(dev_priv->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &dev_priv->gpu_error.flags));
- goto out;
- }
-
- /* Prevent any other reset-engine attempt. */
- for_each_engine(engine, dev_priv, tmp) {
- while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags))
- wait_on_bit(&dev_priv->gpu_error.flags,
- I915_RESET_ENGINE + engine->id,
- TASK_UNINTERRUPTIBLE);
- }
-
- i915_reset_device(dev_priv, engine_mask, msg);
-
- for_each_engine(engine, dev_priv, tmp) {
- clear_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags);
- }
-
- clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.reset_queue);
-
-out:
- intel_runtime_pm_put(dev_priv);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -3417,7 +3129,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3479,7 +3191,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3586,11 +3298,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_GEN5(dev_priv))
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET(DE);
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
@@ -3700,7 +3409,7 @@ static void gen11_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -4045,7 +3754,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -4169,7 +3878,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
/* These are interrupts we'll toggle with the ring mask register */
- uint32_t gt_interrupts[] = {
+ u32 gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
@@ -4183,9 +3892,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
- if (HAS_L3_DPF(dev_priv))
- gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
@@ -4200,8 +3906,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
- uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
- uint32_t de_pipe_enables;
+ u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
@@ -4341,6 +4047,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
gen11_master_intr_enable(dev_priv->regs);
+ POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
}
@@ -4368,8 +4075,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE16(HWSTAM, 0xffff);
-
GEN2_IRQ_RESET();
}
@@ -4513,7 +4218,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4537,8 +4242,6 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET();
}
@@ -4623,7 +4326,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4648,8 +4351,6 @@ static void i965_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET();
}
@@ -4770,10 +4471,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4836,23 +4537,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- if (IS_GEN2(dev_priv)) {
- /* Gen2 doesn't have a hardware frame counter */
- dev->max_vblank_count = 0;
- } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
- } else {
+ else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- }
/*
* Opt out of the vblank disable timer on everything except gen2.
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4924,14 +4619,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
dev->driver->irq_preinstall = i8xx_irq_reset;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 2e0356561839..b5be0abbba35 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
"triaging and debugging hangs.");
#endif
-i915_param_named_unsafe(enable_hangcheck, bool, 0644,
+i915_param_named_unsafe(enable_hangcheck, bool, 0600,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
@@ -97,8 +97,10 @@ i915_param_named_unsafe(disable_power_well, int, 0400,
i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
-i915_param_named(fastboot, bool, 0600,
- "Try to skip unnecessary mode sets at boot time (default: false)");
+i915_param_named(fastboot, int, 0600,
+ "Try to skip unnecessary mode sets at boot time "
+ "(0=disabled, 1=enabled) "
+ "Default: -1 (use per-chip default)");
i915_param_named_unsafe(prefault_disable, bool, 0600,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
@@ -203,3 +205,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
+
+static __always_inline void dup_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *"))
+ *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+}
+
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
+{
+ *dest = *src;
+#define DUP(T, x, ...) dup_param(#T, &dest->x);
+ I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+static __always_inline void free_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *")) {
+ kfree(*(void **)x);
+ *(void **)x = NULL;
+ }
+}
+
+/* free the allocated members, *not* the passed in params itself */
+void i915_params_free(struct i915_params *params)
+{
+#define FREE(T, x, ...) free_param(#T, &params->x);
+ I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 7e56c516c815..3f14e9881a0d 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -33,6 +33,15 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+/*
+ * Invoke param, a function-like macro, for each i915 param, with arguments:
+ *
+ * param(type, name, value)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ */
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
@@ -54,10 +63,10 @@ struct drm_printer;
param(int, edp_vswing, 0) \
param(int, reset, 2) \
param(unsigned int, inject_load_failure, 0) \
+ param(int, fastboot, -1) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
- param(bool, fastboot, false) \
param(bool, prefault_disable, false) \
param(bool, load_detect_test, false) \
param(bool, force_reset_modeset_test, false) \
@@ -78,6 +87,8 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
+void i915_params_free(struct i915_params *params);
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6350db5503cd..66f82f3f050f 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,9 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
+#include <drm/drm_drv.h>
+
+#include "i915_active.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@@ -67,9 +70,15 @@
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
- .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
#define GLK_COLORS \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ }
/* Keep in gen based order, and chronological order within a gen */
@@ -81,7 +90,8 @@
.num_pipes = 1, \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
- .display.has_gmch_display = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -121,7 +131,8 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .display.has_gmch_display = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -197,7 +208,8 @@ static const struct intel_device_info intel_pineview_info = {
GEN(4), \
.num_pipes = 2, \
.display.has_hotplug = 1, \
- .display.has_gmch_display = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -228,6 +240,7 @@ static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
static const struct intel_device_info intel_gm45_info = {
@@ -237,6 +250,7 @@ static const struct intel_device_info intel_gm45_info = {
.display.has_fbc = 1,
.display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
@@ -370,7 +384,7 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .display.has_gmch_display = 1,
+ .display.has_gmch = 1,
.display.has_hotplug = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
@@ -462,7 +476,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .display.has_gmch_display = 1,
+ .display.has_gmch = 1,
.ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
@@ -532,7 +546,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_fbc = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
- .has_pooled_eu = 0, \
.display.has_csr = 1, \
.has_rc6 = 1, \
.display.has_dp_mst = 1, \
@@ -701,6 +714,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_H_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
@@ -787,6 +801,8 @@ static int __init i915_init(void)
bool use_kms = true;
int err;
+ i915_global_active_init();
+
err = i915_mock_selftests();
if (err)
return err > 0 ? 0 : err;
@@ -818,6 +834,7 @@ static void __exit i915_exit(void)
return;
pci_unregister_driver(&i915_pci_driver);
+ i915_global_active_exit();
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2b2eb57ca71f..9ebf99f3d8d3 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -1677,6 +1677,11 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
+
+ CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ gen8_make_rpcs(dev_priv,
+ &to_intel_context(ctx,
+ dev_priv->engine[RCS])->sseu));
}
/*
@@ -1796,7 +1801,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN(dev_priv, 9, 11)) {
+ if (IS_GEN_RANGE(dev_priv, 9, 11)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2087,7 +2092,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- intel_runtime_pm_get(dev_priv);
+ stream->wakeref = intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
@@ -2098,21 +2103,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_lock;
+ stream->ops = &i915_oa_stream_ops;
+ dev_priv->perf.oa.exclusive_stream = stream;
+
ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- stream->ops = &i915_oa_stream_ops;
-
- dev_priv->perf.oa.exclusive_stream = stream;
-
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
err_enable:
+ dev_priv->perf.oa.exclusive_stream = NULL;
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -2123,7 +2128,7 @@ err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
if (stream->ctx)
@@ -2646,7 +2651,7 @@ err:
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
}
/**
@@ -3021,7 +3026,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
(addr >= 0x182300 && addr <= 0x1823A4);
}
-static uint32_t mask_reg_value(u32 reg, u32 val)
+static u32 mask_reg_value(u32 reg, u32 val)
{
/* HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
@@ -3415,7 +3420,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 8, 9)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3431,7 +3436,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
- if (IS_GEN8(dev_priv)) {
+ if (IS_GEN(dev_priv, 8)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
@@ -3442,7 +3447,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
- } else if (IS_GEN(dev_priv, 10, 11)) {
+ } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3471,7 +3476,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
- (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
+ (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index cf7c66bb3ed9..b745c49a5af6 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -168,6 +168,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
bool fw = false;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
@@ -176,7 +177,8 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (!dev_priv->gt.awake)
return;
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return;
for_each_engine(engine, dev_priv, id) {
@@ -211,7 +213,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (fw)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
static void
@@ -228,11 +230,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
u32 val;
val = dev_priv->gt_pm.rps.cur_freq;
- if (dev_priv->gt.awake &&
- intel_runtime_pm_get_if_in_use(dev_priv)) {
- val = intel_get_cagf(dev_priv,
- I915_READ_NOTRACE(GEN6_RPSTAT1));
- intel_runtime_pm_put(dev_priv);
+ if (dev_priv->gt.awake) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
+ val = intel_get_cagf(dev_priv,
+ I915_READ_NOTRACE(GEN6_RPSTAT1));
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@@ -444,12 +447,14 @@ static u64 __get_rc6(struct drm_i915_private *i915)
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
+ intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
- if (intel_runtime_pm_get_if_in_use(i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (wakeref) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
/*
* If we are coming back from being runtime suspended we must
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index fe56465cdfd6..cbcb957b7141 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -13,7 +13,7 @@
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 067054cf4a86..638a586469f9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -117,14 +117,14 @@
*/
typedef struct {
- uint32_t reg;
+ u32 reg;
} i915_reg_t;
#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
#define INVALID_MMIO_REG _MMIO(0)
-static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
+static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
@@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
+
+#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
+
/*
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
* numbers, pick the 0-based __index'th value.
@@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
- dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
- dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
- dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
+ INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_MEDIA4 (1 << 8)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
+#define GEN11_GRDOM_SFC0 (1 << 17)
+#define GEN11_GRDOM_SFC1 (1 << 18)
+
+#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
+
+#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
+#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
+#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
+#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
+#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
+#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
+#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
@@ -2596,10 +2620,6 @@ enum i915_power_well_id {
#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
-#define VLV_DISPLAY_BASE 0x180000
-#define VLV_MIPI_BASE VLV_DISPLAY_BASE
-#define BXT_MIPI_BASE 0x60000
-
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -2781,6 +2801,9 @@ enum i915_power_well_id {
#define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
#define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
+#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
+#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4)
+
/* Fuse readout registers for GT */
#define HSW_PAVP_FUSE1 _MMIO(0x911C)
#define HSW_F1_EU_DIS_SHIFT 16
@@ -3156,9 +3179,9 @@ enum i915_power_well_id {
/*
* Clock control & power management
*/
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
+#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
+#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
@@ -3255,9 +3278,9 @@ enum i915_power_well_id {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
-#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
+#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
+#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
@@ -3329,7 +3352,7 @@ enum i915_power_well_id {
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -3469,7 +3492,7 @@ enum i915_power_well_id {
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
+#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
(i) * 4)
@@ -4252,6 +4275,15 @@ enum {
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
+#define _PSR2_SU_STATUS_0 0x6F914
+#define _PSR2_SU_STATUS_1 0x6F918
+#define _PSR2_SU_STATUS_2 0x6F91C
+#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
+#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
+#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
+#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
+#define PSR2_SU_STATUS_FRAMES 8
+
/* VGA port control */
#define ADPA _MMIO(0x61100)
#define PCH_ADPA _MMIO(0xe1100)
@@ -4302,7 +4334,7 @@ enum {
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -4332,7 +4364,7 @@ enum {
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
/*
* HDMI/DP bits are g4x+
*
@@ -4414,7 +4446,7 @@ enum {
#define PORT_DFT_I9XX _MMIO(0x61150)
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
+#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
@@ -4667,7 +4699,6 @@ enum {
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define _PP_ON_DELAYS 0x61208
@@ -4699,7 +4730,7 @@ enum {
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
/* Panel fitting */
-#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
+#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -4717,7 +4748,7 @@ enum {
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -4729,25 +4760,25 @@ enum {
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -4770,7 +4801,7 @@ enum {
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -4792,7 +4823,7 @@ enum {
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -4867,6 +4898,7 @@ enum {
# define TV_OVERSAMPLE_NONE (2 << 18)
/* Selects 8x oversampling */
# define TV_OVERSAMPLE_8X (3 << 18)
+# define TV_OVERSAMPLE_MASK (3 << 18)
/* Selects progressive mode rather than interlaced */
# define TV_PROGRESSIVE (1 << 17)
/* Sets the colorburst to PAL mode. Required for non-M PAL modes. */
@@ -5416,47 +5448,47 @@ enum {
* is 20 bytes in each direction, hence the 5 fixed
* data registers
*/
-#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
-#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
-#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
-#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
-#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
-
-#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
-#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
-#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
-#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
-#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
-
-#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
-#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
-#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
-#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
-#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
-
-#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
-#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
-#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
-#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
-#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
-
-#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
-#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
-#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
-#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
-#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
-
-#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
-#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
-#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
-#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
-#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
+#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
+#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
+#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
+#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
+#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
+#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
+
+#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
+#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
+#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
+#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
+
+#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
+#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
+#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
+#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
+#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
+#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
+
+#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
+#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
+#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
+#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
+#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
+#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
+
+#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
+#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
+#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
+#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
+#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
+
+#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
+#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
+#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
+#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
+#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
+#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5681,6 +5713,12 @@ enum {
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
+/* Skylake+ pipe bottom (background) color */
+#define _SKL_BOTTOM_COLOR_A 0x70034
+#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31)
+#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30)
+#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A)
+
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
#define PIPEB_HLINE_INT_EN (1 << 28)
@@ -5732,7 +5770,7 @@ enum {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
+#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -5767,7 +5805,7 @@ enum {
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
+#define DSPFW1 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
@@ -5778,7 +5816,7 @@ enum {
#define DSPFW_PLANEA_SHIFT 0
#define DSPFW_PLANEA_MASK (0x7f << 0)
#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
+#define DSPFW2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
@@ -5794,7 +5832,7 @@ enum {
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
+#define DSPFW3 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
#define DSPFW_HPLL_SR_EN (1 << 31)
#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
@@ -5962,7 +6000,7 @@ enum {
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
-#define PLANE_WM_BLOCKS_MASK 0x3ff
+#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
#define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0)
#define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level)))
@@ -6210,35 +6248,35 @@ enum {
* [10:1f] all
* [30:32] all
*/
-#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
-#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
-#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
+#define SWF0(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
/* Pipe B */
-#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
-#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
-#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
+#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
-#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
/* Display B control */
-#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
+#define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
-#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
-#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
-#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+#define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
+#define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
+#define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
+#define _DSPBSIZE (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
+#define _DSPBSURF (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
+#define _DSPBTILEOFF (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBOFFSET (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBSURFLIVE (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
/* ICL DSI 0 and 1 */
#define _PIPEDSI0CONF 0x7b008
@@ -6746,8 +6784,7 @@ enum {
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
-#define SKL_DDB_ENTRY_MASK 0x3FF
-#define ICL_DDB_ENTRY_MASK 0x7FF
+#define DDB_ENTRY_MASK 0x7FF /* skl+: 10 bits, icl+ 11 bits */
#define DDB_ENTRY_END_SHIFT 16
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
@@ -7580,6 +7617,7 @@ enum {
#define _PIPEB_CHICKEN 0x71038
#define _PIPEC_CHICKEN 0x72038
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0)
#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
_PIPEB_CHICKEN)
@@ -8790,7 +8828,7 @@ enum {
#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
/* Audio */
-#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -9525,7 +9563,7 @@ enum skl_power_gate {
#define _MG_PLL3_ENABLE 0x46038
#define _MG_PLL4_ENABLE 0x4603C
/* Bits are the same as DPLL0_ENABLE */
-#define MG_PLL_ENABLE(port) _MMIO_PORT((port) - PORT_C, _MG_PLL1_ENABLE, \
+#define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
_MG_PLL2_ENABLE)
#define _MG_REFCLKIN_CTL_PORT1 0x16892C
@@ -9534,9 +9572,9 @@ enum skl_power_gate {
#define _MG_REFCLKIN_CTL_PORT4 0x16B92C
#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8)
-#define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
- _MG_REFCLKIN_CTL_PORT1, \
- _MG_REFCLKIN_CTL_PORT2)
+#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \
+ _MG_REFCLKIN_CTL_PORT1, \
+ _MG_REFCLKIN_CTL_PORT2)
#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8
#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8
@@ -9546,9 +9584,9 @@ enum skl_power_gate {
#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16)
#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8)
-#define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
- _MG_CLKTOP2_CORECLKCTL1_PORT1, \
- _MG_CLKTOP2_CORECLKCTL1_PORT2)
+#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \
+ _MG_CLKTOP2_CORECLKCTL1_PORT1, \
+ _MG_CLKTOP2_CORECLKCTL1_PORT2)
#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4
#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4
@@ -9566,9 +9604,9 @@ enum skl_power_gate {
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
-#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
- _MG_CLKTOP2_HSCLKCTL_PORT1, \
- _MG_CLKTOP2_HSCLKCTL_PORT2)
+#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \
+ _MG_CLKTOP2_HSCLKCTL_PORT1, \
+ _MG_CLKTOP2_HSCLKCTL_PORT2)
#define _MG_PLL_DIV0_PORT1 0x168A00
#define _MG_PLL_DIV0_PORT2 0x169A00
@@ -9580,8 +9618,8 @@ enum skl_power_gate {
#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8)
#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0)
#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
-#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
- _MG_PLL_DIV0_PORT2)
+#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \
+ _MG_PLL_DIV0_PORT2)
#define _MG_PLL_DIV1_PORT1 0x168A04
#define _MG_PLL_DIV1_PORT2 0x169A04
@@ -9595,8 +9633,8 @@ enum skl_power_gate {
#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4)
#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0)
#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0)
-#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
- _MG_PLL_DIV1_PORT2)
+#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \
+ _MG_PLL_DIV1_PORT2)
#define _MG_PLL_LF_PORT1 0x168A08
#define _MG_PLL_LF_PORT2 0x169A08
@@ -9608,8 +9646,8 @@ enum skl_power_gate {
#define MG_PLL_LF_GAINCTRL(x) ((x) << 16)
#define MG_PLL_LF_INT_COEFF(x) ((x) << 8)
#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0)
-#define MG_PLL_LF(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_LF_PORT1, \
- _MG_PLL_LF_PORT2)
+#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \
+ _MG_PLL_LF_PORT2)
#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C
#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C
@@ -9621,9 +9659,9 @@ enum skl_power_gate {
#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10)
#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8)
#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0)
-#define MG_PLL_FRAC_LOCK(port) _MMIO_PORT((port) - PORT_C, \
- _MG_PLL_FRAC_LOCK_PORT1, \
- _MG_PLL_FRAC_LOCK_PORT2)
+#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \
+ _MG_PLL_FRAC_LOCK_PORT1, \
+ _MG_PLL_FRAC_LOCK_PORT2)
#define _MG_PLL_SSC_PORT1 0x168A10
#define _MG_PLL_SSC_PORT2 0x169A10
@@ -9635,8 +9673,8 @@ enum skl_power_gate {
#define MG_PLL_SSC_STEPNUM(x) ((x) << 10)
#define MG_PLL_SSC_FLLEN (1 << 9)
#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0)
-#define MG_PLL_SSC(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_SSC_PORT1, \
- _MG_PLL_SSC_PORT2)
+#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \
+ _MG_PLL_SSC_PORT2)
#define _MG_PLL_BIAS_PORT1 0x168A14
#define _MG_PLL_BIAS_PORT2 0x169A14
@@ -9655,8 +9693,8 @@ enum skl_power_gate {
#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5)
#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0)
-#define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
- _MG_PLL_BIAS_PORT2)
+#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \
+ _MG_PLL_BIAS_PORT2)
#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18
#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18
@@ -9667,9 +9705,9 @@ enum skl_power_gate {
#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16)
#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2)
#define MG_PLL_TDC_TDCSEL(x) ((x) << 0)
-#define MG_PLL_TDC_COLDST_BIAS(port) _MMIO_PORT((port) - PORT_C, \
- _MG_PLL_TDC_COLDST_BIAS_PORT1, \
- _MG_PLL_TDC_COLDST_BIAS_PORT2)
+#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \
+ _MG_PLL_TDC_COLDST_BIAS_PORT1, \
+ _MG_PLL_TDC_COLDST_BIAS_PORT2)
#define _CNL_DPLL0_CFGCR0 0x6C000
#define _CNL_DPLL1_CFGCR0 0x6C080
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ca95ab2f4cfa..c2a5c48c7541 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,6 +29,8 @@
#include <linux/sched/signal.h>
#include "i915_drv.h"
+#include "i915_active.h"
+#include "i915_reset.h"
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -59,7 +61,7 @@ static bool i915_fence_signaled(struct dma_fence *fence)
static bool i915_fence_enable_signaling(struct dma_fence *fence)
{
- return intel_engine_enable_signaling(to_request(fence), true);
+ return i915_request_enable_breadcrumb(to_request(fence));
}
static signed long i915_fence_wait(struct dma_fence *fence,
@@ -111,99 +113,10 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+static void reserve_gt(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
- enum intel_engine_id id;
- int ret;
-
- /* Carefully retire all requests without writing to the rings */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915->gt.active_requests);
-
- /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- for_each_engine(engine, i915, id) {
- GEM_TRACE("%s seqno %d (current %d) -> %d\n",
- engine->name,
- engine->timeline.seqno,
- intel_engine_get_seqno(engine),
- seqno);
-
- if (seqno == engine->timeline.seqno)
- continue;
-
- kthread_park(engine->breadcrumbs.signaler);
-
- if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
- /* Flush any waiters before we reuse the seqno */
- intel_engine_disarm_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
- }
-
- /* Check we are idle before we fiddle with hw state! */
- GEM_BUG_ON(!intel_engine_is_idle(engine));
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
-
- /* Finally reset hw state */
- intel_engine_init_global_seqno(engine, seqno);
- engine->timeline.seqno = seqno;
-
- kthread_unpark(engine->breadcrumbs.signaler);
- }
-
- list_for_each_entry(timeline, &i915->gt.timelines, link)
- memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
-
- i915->gt.request_serial = seqno;
-
- return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (seqno == 0)
- return -EINVAL;
-
- /* HWS page needs to be set less than what we will inject to ring */
- return reset_all_global_seqno(i915, seqno - 1);
-}
-
-static int reserve_gt(struct drm_i915_private *i915)
-{
- int ret;
-
- /*
- * Reservation is fine until we may need to wrap around
- *
- * By incrementing the serial for every request, we know that no
- * individual engine may exceed that serial (as each is reset to 0
- * on any wrap). This protects even the most pessimistic of migrations
- * of every request from all engines onto just one.
- */
- while (unlikely(++i915->gt.request_serial == 0)) {
- ret = reset_all_global_seqno(i915, 0);
- if (ret) {
- i915->gt.request_serial--;
- return ret;
- }
- }
-
if (!i915->gt.active_requests++)
i915_gem_unpark(i915);
-
- return 0;
}
static void unreserve_gt(struct drm_i915_private *i915)
@@ -213,12 +126,6 @@ static void unreserve_gt(struct drm_i915_private *i915)
i915_gem_park(i915);
}
-void i915_gem_retire_noop(struct i915_gem_active *active,
- struct i915_request *request)
-{
- /* Space left intentionally blank */
-}
-
static void advance_ring(struct i915_request *request)
{
struct intel_ring *ring = request->ring;
@@ -270,10 +177,11 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
+ hwsp_seqno(rq),
intel_engine_get_seqno(engine));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -286,10 +194,11 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
spin_unlock(&engine->timeline.lock);
spin_lock(&rq->lock);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ i915_request_mark_complete(rq);
+ if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- intel_engine_cancel_signaling(rq);
+ i915_request_cancel_breadcrumb(rq);
if (rq->waitboost) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
@@ -330,12 +239,13 @@ static void __retire_engine_upto(struct intel_engine_cs *engine,
static void i915_request_retire(struct i915_request *request)
{
- struct i915_gem_active *active, *next;
+ struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
+ hwsp_seqno(request),
intel_engine_get_seqno(request->engine));
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -363,10 +273,10 @@ static void i915_request_retire(struct i915_request *request)
* we may spend an inordinate amount of time simply handling
* the retirement of requests and processing their callbacks.
* Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of i915_gem_active.
- * So we try to keep this loop as streamlined as possible and
- * also prefetch the next i915_gem_active to try and hide
- * the likely cache miss.
+ * cache misses when jumping around the list of
+ * i915_active_request. So we try to keep this loop as
+ * streamlined as possible and also prefetch the next
+ * i915_active_request to try and hide the likely cache miss.
*/
prefetchw(next);
@@ -395,10 +305,11 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
+ hwsp_seqno(rq),
intel_engine_get_seqno(rq->engine));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
@@ -417,7 +328,7 @@ void i915_request_retire_upto(struct i915_request *rq)
static u32 timeline_get_seqno(struct i915_timeline *tl)
{
- return ++tl->seqno;
+ return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
static void move_to_timeline(struct i915_request *request,
@@ -431,15 +342,23 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
+static u32 next_global_seqno(struct i915_timeline *tl)
+{
+ if (!++tl->seqno)
+ ++tl->seqno;
+ return tl->seqno;
+}
+
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
u32 seqno;
- GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
engine->name,
request->fence.context, request->fence.seqno,
engine->timeline.seqno + 1,
+ hwsp_seqno(request),
intel_engine_get_seqno(engine));
GEM_BUG_ON(!irqs_disabled());
@@ -447,26 +366,27 @@ void __i915_request_submit(struct i915_request *request)
GEM_BUG_ON(request->global_seqno);
- seqno = timeline_get_seqno(&engine->timeline);
+ seqno = next_global_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(intel_engine_signaled(engine, seqno));
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
request->global_seqno = seqno;
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_enable_signaling(request, false);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !i915_request_enable_breadcrumb(request))
+ intel_engine_queue_breadcrumbs(engine);
spin_unlock(&request->lock);
- engine->emit_breadcrumb(request,
- request->ring->vaddr + request->postfix);
+ engine->emit_fini_breadcrumb(request,
+ request->ring->vaddr + request->postfix);
/* Transfer from per-context onto the global per-engine timeline */
move_to_timeline(request, &engine->timeline);
trace_i915_request_execute(request);
-
- wake_up_all(&request->execute);
}
void i915_request_submit(struct i915_request *request)
@@ -486,10 +406,11 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
+ GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
+ hwsp_seqno(request),
intel_engine_get_seqno(engine));
GEM_BUG_ON(!irqs_disabled());
@@ -508,7 +429,9 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = 0;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_cancel_signaling(request);
+ i915_request_cancel_breadcrumb(request);
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@@ -566,6 +489,43 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void ring_retire_requests(struct intel_ring *ring)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
+ if (!i915_request_completed(rq))
+ break;
+
+ i915_request_retire(rq);
+ }
+}
+
+static noinline struct i915_request *
+i915_request_alloc_slow(struct intel_context *ce)
+{
+ struct intel_ring *ring = ce->ring;
+ struct i915_request *rq;
+
+ if (list_empty(&ring->request_list))
+ goto out;
+
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ cond_synchronize_rcu(rq->rcustate);
+
+ /* Retire our old requests in the hope that we free some */
+ ring_retire_requests(ring);
+
+out:
+ return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+}
+
+static int add_timeline_barrier(struct i915_request *rq)
+{
+ return i915_request_await_active_request(rq, &rq->timeline->barrier);
+}
+
/**
* i915_request_alloc - allocate a request structure
*
@@ -608,13 +568,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (IS_ERR(ce))
return ERR_CAST(ce);
- ret = reserve_gt(i915);
- if (ret)
- goto err_unpin;
-
- ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
- if (ret)
- goto err_unreserve;
+ reserve_gt(i915);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -628,7 +582,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* We use RCU to look up requests in flight. The lookups may
* race with the request being allocated from the slab freelist.
* That is the request we are writing to here, may be in the process
- * of being read by __i915_gem_active_get_rcu(). As such,
+ * of being read by __i915_active_request_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer,
* read the request->global_seqno and increment the reference count.
@@ -654,15 +608,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq = kmem_cache_alloc(i915->requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- i915_retire_requests(i915);
-
- /* Ratelimit ourselves to prevent oom from malicious clients */
- rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (rq)
- cond_synchronize_rcu(rq->rcustate);
-
- rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ rq = i915_request_alloc_slow(ce);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -679,6 +625,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->ring = ce->ring;
rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline);
+ rq->hwsp_seqno = rq->timeline->hwsp_seqno;
spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence,
@@ -689,13 +636,11 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- init_waitqueue_head(&rq->execute);
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
rq->global_seqno = 0;
- rq->signaling.wait.seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -707,9 +652,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
+ *
+ * Note that due to how we add reserved_space to intel_ring_begin()
+ * we need to double our request to ensure that if we need to wrap
+ * around inside i915_request_add() there is sufficient space at
+ * the beginning of the ring as well.
*/
- rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -719,8 +668,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+ ret = add_timeline_barrier(rq);
if (ret)
goto err_unwind;
@@ -748,7 +696,6 @@ err_unwind:
kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_gt(i915);
-err_unpin:
intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -776,34 +723,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- return ret < 0 ? ret : 0;
- }
-
- if (to->engine->semaphore.sync_to) {
- u32 seqno;
-
- GEM_BUG_ON(!from->engine->semaphore.signal);
-
- seqno = i915_request_global_seqno(from);
- if (!seqno)
- goto await_dma_fence;
-
- if (seqno <= to->timeline->global_sync[from->engine->id])
- return 0;
-
- trace_i915_gem_ring_sync_to(to, from);
- ret = to->engine->semaphore.sync_to(to, from);
- if (ret)
- return ret;
-
- to->timeline->global_sync[from->engine->id] = seqno;
- return 0;
+ } else {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
-await_dma_fence:
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -961,7 +886,7 @@ void i915_request_add(struct i915_request *request)
struct i915_request *prev;
u32 *cs;
- GEM_TRACE("%s fence %llx:%d\n",
+ GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -979,8 +904,8 @@ void i915_request_add(struct i915_request *request)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
+ GEM_BUG_ON(request->reserved_space > request->ring->space);
request->reserved_space = 0;
- engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -988,7 +913,7 @@ void i915_request_add(struct i915_request *request)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
@@ -999,8 +924,8 @@ void i915_request_add(struct i915_request *request)
* see a more recent value in the hws than we are tracking.
*/
- prev = i915_gem_active_raw(&timeline->last_request,
- &request->i915->drm.struct_mutex);
+ prev = i915_active_request_raw(&timeline->last_request,
+ &request->i915->drm.struct_mutex);
if (prev && !i915_request_completed(prev)) {
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
@@ -1016,7 +941,7 @@ void i915_request_add(struct i915_request *request)
spin_unlock_irq(&timeline->lock);
GEM_BUG_ON(timeline->seqno != request->fence.seqno);
- i915_gem_active_set(&timeline->last_request, request);
+ __i915_active_request_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
if (list_is_first(&request->ring_link, &ring->request_list)) {
@@ -1047,7 +972,7 @@ void i915_request_add(struct i915_request *request)
* Allow interactive/synchronous clients to jump ahead of
* the bulk clients. (FQ_CODEL)
*/
- if (!prev || i915_request_completed(prev))
+ if (list_empty(&request->sched.signalers_list))
attr.priority |= I915_PRIORITY_NEWCLIENT;
engine->schedule(request, &attr);
@@ -1110,13 +1035,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request *rq,
- u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq,
+ int state, unsigned long timeout_us)
{
- struct intel_engine_cs *engine = rq->engine;
- unsigned int irq, cpu;
-
- GEM_BUG_ON(!seqno);
+ unsigned int cpu;
/*
* Only wait for the request if we know it is likely to complete.
@@ -1124,12 +1046,12 @@ static bool __i915_spin_request(const struct i915_request *rq,
* We don't track the timestamps around requests, nor the average
* request length, so we do not have a good indicator that this
* request will complete within the timeout. What we do know is the
- * order in which requests are executed by the engine and so we can
- * tell if the request has started. If the request hasn't started yet,
- * it is a fair assumption that it will not complete within our
- * relatively short timeout.
+ * order in which requests are executed by the context and so we can
+ * tell if the request has been started. If the request is not even
+ * running yet, it is a fair assumption that it will not complete
+ * within our relatively short timeout.
*/
- if (!intel_engine_has_started(engine, seqno))
+ if (!i915_request_is_running(rq))
return false;
/*
@@ -1143,20 +1065,10 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (intel_engine_has_completed(engine, seqno))
- return seqno == i915_request_global_seqno(rq);
-
- /*
- * Seqno are meant to be ordered *before* the interrupt. If
- * we see an interrupt without a corresponding seqno advance,
- * assume we won't see one in the near future but require
- * the engine->seqno_barrier() to fixup coherency.
- */
- if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
- break;
+ if (i915_request_completed(rq))
+ return true;
if (signal_pending_state(state, current))
break;
@@ -1170,16 +1082,16 @@ static bool __i915_spin_request(const struct i915_request *rq,
return false;
}
-static bool __i915_wait_request_check_and_reset(struct i915_request *request)
-{
- struct i915_gpu_error *error = &request->i915->gpu_error;
+struct request_wait {
+ struct dma_fence_cb cb;
+ struct task_struct *tsk;
+};
- if (likely(!i915_reset_handoff(error)))
- return false;
+static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct request_wait *wait = container_of(cb, typeof(*wait), cb);
- __set_current_state(TASK_RUNNING);
- i915_reset(request->i915, error->stalled_mask, error->reason);
- return true;
+ wake_up_process(wait->tsk);
}
/**
@@ -1207,17 +1119,9 @@ long i915_request_wait(struct i915_request *rq,
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
- DEFINE_WAIT_FUNC(reset, default_wake_function);
- DEFINE_WAIT_FUNC(exec, default_wake_function);
- struct intel_wait wait;
+ struct request_wait wait;
might_sleep();
-#if IS_ENABLED(CONFIG_LOCKDEP)
- GEM_BUG_ON(debug_locks &&
- !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
- !!(flags & I915_WAIT_LOCKED));
-#endif
GEM_BUG_ON(timeout < 0);
if (i915_request_completed(rq))
@@ -1228,57 +1132,23 @@ long i915_request_wait(struct i915_request *rq,
trace_i915_request_wait_begin(rq, flags);
- add_wait_queue(&rq->execute, &exec);
- if (flags & I915_WAIT_LOCKED)
- add_wait_queue(errq, &reset);
+ /* Optimistic short spin before touching IRQs */
+ if (__i915_spin_request(rq, state, 5))
+ goto out;
- intel_wait_init(&wait);
if (flags & I915_WAIT_PRIORITY)
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
-restart:
- do {
- set_current_state(state);
- if (intel_wait_update_request(&wait, rq))
- break;
-
- if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(rq))
- continue;
-
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- goto complete;
- }
-
- if (!timeout) {
- timeout = -ETIME;
- goto complete;
- }
-
- timeout = io_schedule_timeout(timeout);
- } while (1);
-
- GEM_BUG_ON(!intel_wait_has_seqno(&wait));
- GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+ wait.tsk = current;
+ if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
+ goto out;
- /* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(rq, wait.seqno, state, 5))
- goto complete;
-
- set_current_state(state);
- if (intel_engine_add_wait(rq->engine, &wait))
- /*
- * In order to check that we haven't missed the interrupt
- * as we enabled it, we need to kick ourselves to do a
- * coherent check on the seqno before we sleep.
- */
- goto wakeup;
+ for (;;) {
+ set_current_state(state);
- if (flags & I915_WAIT_LOCKED)
- __i915_wait_request_check_and_reset(rq);
+ if (i915_request_completed(rq))
+ break;
- for (;;) {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
@@ -1290,70 +1160,14 @@ restart:
}
timeout = io_schedule_timeout(timeout);
-
- if (intel_wait_complete(&wait) &&
- intel_wait_check_request(&wait, rq))
- break;
-
- set_current_state(state);
-
-wakeup:
- /*
- * Carefully check if the request is complete, giving time
- * for the seqno to be visible following the interrupt.
- * We also have to check in case we are kicked by the GPU
- * reset in order to drop the struct_mutex.
- */
- if (__i915_request_irq_complete(rq))
- break;
-
- /*
- * If the GPU is hung, and we hold the lock, reset the GPU
- * and then check for completion. On a full reset, the engine's
- * HW seqno will be advanced passed us and we are complete.
- * If we do a partial reset, we have to wait for the GPU to
- * resume and update the breadcrumb.
- *
- * If we don't hold the mutex, we can just wait for the worker
- * to come along and update the breadcrumb (either directly
- * itself, or indirectly by recovering the GPU).
- */
- if (flags & I915_WAIT_LOCKED &&
- __i915_wait_request_check_and_reset(rq))
- continue;
-
- /* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(rq, wait.seqno, state, 2))
- break;
-
- if (!intel_wait_check_request(&wait, rq)) {
- intel_engine_remove_wait(rq->engine, &wait);
- goto restart;
- }
}
-
- intel_engine_remove_wait(rq->engine, &wait);
-complete:
__set_current_state(TASK_RUNNING);
- if (flags & I915_WAIT_LOCKED)
- remove_wait_queue(errq, &reset);
- remove_wait_queue(&rq->execute, &exec);
- trace_i915_request_wait_end(rq);
-
- return timeout;
-}
-static void ring_retire_requests(struct intel_ring *ring)
-{
- struct i915_request *request, *next;
+ dma_fence_remove_callback(&rq->fence, &wait.cb);
- list_for_each_entry_safe(request, next,
- &ring->request_list, ring_link) {
- if (!i915_request_completed(request))
- break;
-
- i915_request_retire(request);
- }
+out:
+ trace_i915_request_wait_end(rq);
+ return timeout;
}
void i915_retire_requests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 90e9d170a0cd..40f3e8dcbdd5 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -30,7 +30,6 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_sw_fence.h"
-#include "i915_scheduler.h"
#include <uapi/drm/i915_drm.h>
@@ -39,23 +38,34 @@ struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
-struct intel_wait {
- struct rb_node node;
- struct task_struct *tsk;
- struct i915_request *request;
- u32 seqno;
-};
-
-struct intel_signal_node {
- struct intel_wait wait;
- struct list_head link;
-};
-
struct i915_capture_list {
struct i915_capture_list *next;
struct i915_vma *vma;
};
+enum {
+ /*
+ * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
+ *
+ * Set by __i915_request_submit() on handing over to HW, and cleared
+ * by __i915_request_unsubmit() if we preempt this request.
+ *
+ * Finally cleared for consistency on retiring the request, when
+ * we know the HW is no longer running this request.
+ *
+ * See i915_request_is_active()
+ */
+ I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
+
+ /*
+ * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
+ *
+ * Internal bookkeeping used by the breadcrumb code to track when
+ * a request is on the various signal_list.
+ */
+ I915_FENCE_FLAG_SIGNAL,
+};
+
/**
* Request queue structure.
*
@@ -98,7 +108,7 @@ struct i915_request {
struct intel_context *hw_context;
struct intel_ring *ring;
struct i915_timeline *timeline;
- struct intel_signal_node signaling;
+ struct list_head signal_link;
/*
* The rcu epoch of when this request was allocated. Used to judiciously
@@ -117,7 +127,6 @@ struct i915_request {
*/
struct i915_sw_fence submit;
wait_queue_entry_t submitq;
- wait_queue_head_t execute;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -131,6 +140,13 @@ struct i915_request {
struct i915_sched_node sched;
struct i915_dependency dep;
+ /*
+ * A convenience pointer to the current breadcrumb value stored in
+ * the HW status page (or our timeline's local equivalent). The full
+ * path would be rq->hw_context->ring->timeline->hwsp_seqno.
+ */
+ const u32 *hwsp_seqno;
+
/**
* GEM sequence number associated with this request on the
* global execution timeline. It is zero when the request is not
@@ -249,7 +265,7 @@ i915_request_put(struct i915_request *rq)
* that it has passed the global seqno and the global seqno is unchanged
* after the read, it is indeed complete).
*/
-static u32
+static inline u32
i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
@@ -271,6 +287,10 @@ void i915_request_skip(struct i915_request *request, int error);
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
+/* Note: part of the intel_breadcrumbs family */
+bool i915_request_enable_breadcrumb(struct i915_request *request);
+void i915_request_cancel_breadcrumb(struct i915_request *request);
+
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
@@ -281,441 +301,106 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
- u32 seqno);
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
- u32 seqno);
-
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
-{
- return (s32)(seq1 - seq2) >= 0;
-}
-
-/**
- * i915_request_started - check if the request has begun being executed
- * @rq: the request
- *
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
- */
-static inline bool i915_request_started(const struct i915_request *rq)
-{
- u32 seqno;
-
- seqno = i915_request_global_seqno(rq);
- if (!seqno) /* not yet submitted to HW */
- return false;
-
- return intel_engine_has_started(rq->engine, seqno);
-}
-
-static inline bool
-__i915_request_completed(const struct i915_request *rq, u32 seqno)
+static inline bool i915_request_signaled(const struct i915_request *rq)
{
- GEM_BUG_ON(!seqno);
- return intel_engine_has_completed(rq->engine, seqno) &&
- seqno == i915_request_global_seqno(rq);
+ /* The request may live longer than its HWSP, so check flags first! */
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
}
-static inline bool i915_request_completed(const struct i915_request *rq)
+static inline bool i915_request_is_active(const struct i915_request *rq)
{
- u32 seqno;
-
- seqno = i915_request_global_seqno(rq);
- if (!seqno)
- return false;
-
- return __i915_request_completed(rq, seqno);
+ return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
-void i915_retire_requests(struct drm_i915_private *i915);
-
-/*
- * We treat requests as fences. This is not be to confused with our
- * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
- * We use the fences to synchronize access from the CPU with activity on the
- * GPU, for example, we should not rewrite an object's PTE whilst the GPU
- * is reading them. We also track fences at a higher level to provide
- * implicit synchronisation around GEM objects, e.g. set-domain will wait
- * for outstanding GPU rendering before marking the object ready for CPU
- * access, or a pageflip will wait until the GPU is complete before showing
- * the frame on the scanout.
- *
- * In order to use a fence, the object must track the fence it needs to
- * serialise with. For example, GEM objects want to track both read and
- * write access so that we can perform concurrent read operations between
- * the CPU and GPU engines, as well as waiting for all rendering to
- * complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_gem_active to track the most recent (in
- * retirement order) request relevant for the desired mode of access.
- * The #i915_gem_active is updated with i915_gem_active_set() to track the
- * most recent fence request, typically this is done as part of
- * i915_vma_move_to_active().
- *
- * When the #i915_gem_active completes (is retired), it will
- * signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_gem_active.request == NULL). The owner
- * can then perform any action, such as delayed freeing of an active
- * resource including itself.
- */
-struct i915_gem_active;
-
-typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
- struct i915_request *);
-
-struct i915_gem_active {
- struct i915_request __rcu *request;
- struct list_head link;
- i915_gem_retire_fn retire;
-};
-
-void i915_gem_retire_noop(struct i915_gem_active *,
- struct i915_request *request);
-
/**
- * init_request_active - prepares the activity tracker for use
- * @active - the active tracker
- * @func - a callback when then the tracker is retired (becomes idle),
- * can be NULL
- *
- * init_request_active() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
- * after completion, the optional callback @func is invoked.
- */
-static inline void
-init_request_active(struct i915_gem_active *active,
- i915_gem_retire_fn retire)
-{
- RCU_INIT_POINTER(active->request, NULL);
- INIT_LIST_HEAD(&active->link);
- active->retire = retire ?: i915_gem_retire_noop;
-}
-
-/**
- * i915_gem_active_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * i915_gem_active_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-i915_gem_active_set(struct i915_gem_active *active,
- struct i915_request *request)
-{
- list_move(&active->link, &request->active_list);
- rcu_assign_pointer(active->request, request);
-}
-
-/**
- * i915_gem_active_set_retire_fn - updates the retirement callback
- * @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
- *
- * i915_gem_active_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
+ * Returns true if seq1 is later than seq2.
*/
-static inline void
-i915_gem_active_set_retire_fn(struct i915_gem_active *active,
- i915_gem_retire_fn fn,
- struct mutex *mutex)
+static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
{
- lockdep_assert_held(mutex);
- active->retire = fn ?: i915_gem_retire_noop;
+ return (s32)(seq1 - seq2) >= 0;
}
-static inline struct i915_request *
-__i915_gem_active_peek(const struct i915_gem_active *active)
+static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- /*
- * Inside the error capture (running with the driver in an unknown
- * state), we want to bend the rules slightly (a lot).
- *
- * Work is in progress to make it safer, in the meantime this keeps
- * the known issue from spamming the logs.
- */
- return rcu_dereference_protected(active->request, 1);
+ return READ_ONCE(*rq->hwsp_seqno);
}
/**
- * i915_gem_active_raw - return the active request
- * @active - the active tracker
+ * hwsp_seqno - the current breadcrumb value in the HW status page
+ * @rq: the request, to chase the relevant HW status page
*
- * i915_gem_active_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
-{
- return rcu_dereference_protected(active->request,
- lockdep_is_held(mutex));
-}
-
-/**
- * i915_gem_active_peek - report the active request being monitored
- * @active - the active tracker
+ * The emphasis in naming here is that hwsp_seqno() is not a property of the
+ * request, but an indication of the current HW state (associated with this
+ * request). Its value will change as the GPU executes more requests.
*
- * i915_gem_active_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
+ * Returns the current breadcrumb value in the associated HW status page (or
+ * the local timeline's equivalent) for this request. The request itself
+ * has the associated breadcrumb value of rq->fence.seqno, when the HW
+ * status page has that breadcrumb or later, this request is complete.
*/
-static inline struct i915_request *
-i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
+static inline u32 hwsp_seqno(const struct i915_request *rq)
{
- struct i915_request *request;
+ u32 seqno;
- request = i915_gem_active_raw(active, mutex);
- if (!request || i915_request_completed(request))
- return NULL;
+ rcu_read_lock(); /* the HWSP may be freed at runtime */
+ seqno = __hwsp_seqno(rq);
+ rcu_read_unlock();
- return request;
+ return seqno;
}
-/**
- * i915_gem_active_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_gem_active_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
+static inline bool __i915_request_has_started(const struct i915_request *rq)
{
- return i915_request_get(i915_gem_active_peek(active, mutex));
+ return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
}
/**
- * __i915_gem_active_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_gem_active_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold the RCU read lock, but
- * the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_gem_active_get_rcu(const struct i915_gem_active *active)
-{
- /*
- * Performing a lockless retrieval of the active request is super
- * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
- * slab of request objects will not be freed whilst we hold the
- * RCU read lock. It does not guarantee that the request itself
- * will not be freed and then *reused*. Viz,
- *
- * Thread A Thread B
- *
- * rq = active.request
- * retire(rq) -> free(rq);
- * (rq is now first on the slab freelist)
- * active.request = NULL
- *
- * rq = new submission on a new object
- * ref(rq)
- *
- * To prevent the request from being reused whilst the caller
- * uses it, we take a reference like normal. Whilst acquiring
- * the reference we check that it is not in a destroyed state
- * (refcnt == 0). That prevents the request being reallocated
- * whilst the caller holds on to it. To check that the request
- * was not reallocated as we acquired the reference we have to
- * check that our request remains the active request across
- * the lookup, in the same manner as a seqlock. The visibility
- * of the pointer versus the reference counting is controlled
- * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
- *
- * In the middle of all that, we inspect whether the request is
- * complete. Retiring is lazy so the request may be completed long
- * before the active tracker is updated. Querying whether the
- * request is complete is far cheaper (as it involves no locked
- * instructions setting cachelines to exclusive) than acquiring
- * the reference, so we do it first. The RCU read lock ensures the
- * pointer dereference is valid, but does not ensure that the
- * seqno nor HWS is the right one! However, if the request was
- * reallocated, that means the active tracker's request was complete.
- * If the new request is also complete, then both are and we can
- * just report the active tracker is idle. If the new request is
- * incomplete, then we acquire a reference on it and check that
- * it remained the active request.
- *
- * It is then imperative that we do not zero the request on
- * reallocation, so that we can chase the dangling pointers!
- * See i915_request_alloc().
- */
- do {
- struct i915_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_request_completed(request))
- return NULL;
-
- /*
- * An especially silly compiler could decide to recompute the
- * result of i915_request_completed, more specifically
- * re-emit the load for request->fence.seqno. A race would catch
- * a later seqno value, which could flip the result from true to
- * false. Which means part of the instructions below might not
- * be executed, while later on instructions are executed. Due to
- * barriers within the refcounting the inconsistency can't reach
- * past the call to i915_request_get_rcu, but not executing
- * that while still executing i915_request_put() creates
- * havoc enough. Prevent this with a compiler barrier.
- */
- barrier();
-
- request = i915_request_get_rcu(request);
-
- /*
- * What stops the following rcu_access_pointer() from occurring
- * before the above i915_request_get_rcu()? If we were
- * to read the value before pausing to get the reference to
- * the request, we may not notice a change in the active
- * tracker.
- *
- * The rcu_access_pointer() is a mere compiler barrier, which
- * means both the CPU and compiler are free to perform the
- * memory read without constraint. The compiler only has to
- * ensure that any operations after the rcu_access_pointer()
- * occur afterwards in program order. This means the read may
- * be performed earlier by an out-of-order CPU, or adventurous
- * compiler.
- *
- * The atomic operation at the heart of
- * i915_request_get_rcu(), see dma_fence_get_rcu(), is
- * atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_request_get_rcu()
- * returns the request (and so with the reference counted
- * incremented) then the following read for rcu_access_pointer()
- * must occur after the atomic operation and so confirm
- * that this request is the one currently being tracked.
- *
- * The corresponding write barrier is part of
- * rcu_assign_pointer().
- */
- if (!request || request == rcu_access_pointer(active->request))
- return rcu_pointer_handoff(request);
-
- i915_request_put(request);
- } while (1);
-}
-
-/**
- * i915_gem_active_get_unlocked - return a reference to the active request
- * @active - the active tracker
- *
- * i915_gem_active_get_unlocked() returns a reference to the active request,
- * or NULL if the active tracker is idle. The reference is obtained under RCU,
- * so no locking is required by the caller.
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
*
- * The reference should be freed with i915_request_put().
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
*/
-static inline struct i915_request *
-i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+static inline bool i915_request_started(const struct i915_request *rq)
{
- struct i915_request *request;
+ if (i915_request_signaled(rq))
+ return true;
- rcu_read_lock();
- request = __i915_gem_active_get_rcu(active);
- rcu_read_unlock();
-
- return request;
+ /* Remember: started but may have since been preempted! */
+ return __i915_request_has_started(rq);
}
/**
- * i915_gem_active_isset - report whether the active tracker is assigned
- * @active - the active tracker
+ * i915_request_is_running - check if the request may actually be executing
+ * @rq: the request
*
- * i915_gem_active_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
- * and this may report stale information.
+ * Returns true if the request is currently submitted to hardware, has passed
+ * its start point (i.e. the context is setup and not busywaiting). Note that
+ * it may no longer be running by the time the function returns!
*/
-static inline bool
-i915_gem_active_isset(const struct i915_gem_active *active)
+static inline bool i915_request_is_running(const struct i915_request *rq)
{
- return rcu_access_pointer(active->request);
+ if (!i915_request_is_active(rq))
+ return false;
+
+ return __i915_request_has_started(rq);
}
-/**
- * i915_gem_active_wait - waits until the request is completed
- * @active - the active request on which to wait
- * @flags - how to wait
- * @timeout - how long to wait at most
- * @rps - userspace client to charge for a waitboost
- *
- * i915_gem_active_wait() waits until the request is completed before
- * returning, without requiring any locks to be held. Note that it does not
- * retire any requests before returning.
- *
- * This function relies on RCU in order to acquire the reference to the active
- * request without holding any locks. See __i915_gem_active_get_rcu() for the
- * glory details on how that is managed. Once the reference is acquired, we
- * can then wait upon the request, and afterwards release our reference,
- * free of any locking.
- *
- * This function wraps i915_request_wait(), see it for the full details on
- * the arguments.
- *
- * Returns 0 if successful, or a negative error code.
- */
-static inline int
-i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
+static inline bool i915_request_completed(const struct i915_request *rq)
{
- struct i915_request *request;
- long ret = 0;
-
- request = i915_gem_active_get_unlocked(active);
- if (request) {
- ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(request);
- }
+ if (i915_request_signaled(rq))
+ return true;
- return ret < 0 ? ret : 0;
+ return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
}
-/**
- * i915_gem_active_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_gem_active_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_gem_active_retire(struct i915_gem_active *active,
- struct mutex *mutex)
+static inline void i915_request_mark_complete(struct i915_request *rq)
{
- struct i915_request *request;
- long ret;
-
- request = i915_gem_active_raw(active, mutex);
- if (!request)
- return 0;
-
- ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0)
- return ret;
-
- list_del_init(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, request);
-
- return 0;
+ rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
-#define for_each_active(mask, idx) \
- for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+void i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
new file mode 100644
index 000000000000..0e0ddf2e6815
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -0,0 +1,1349 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
+
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_reset.h"
+
+#include "intel_guc.h"
+
+#define RESET_MAX_RETRIES 3
+
+/* XXX How to handle concurrent GGTT updates using tiling registers? */
+#define RESET_UNDER_STOP_MACHINE 0
+
+static void engine_skip_context(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *hung_ctx = rq->gem_context;
+ struct i915_timeline *timeline = rq->timeline;
+
+ lockdep_assert_held(&engine->timeline.lock);
+ GEM_BUG_ON(timeline == &engine->timeline);
+
+ spin_lock(&timeline->lock);
+
+ if (i915_request_is_active(rq)) {
+ list_for_each_entry_continue(rq,
+ &engine->timeline.requests, link)
+ if (rq->gem_context == hung_ctx)
+ i915_request_skip(rq, -EIO);
+ }
+
+ list_for_each_entry(rq, &timeline->requests, link)
+ i915_request_skip(rq, -EIO);
+
+ spin_unlock(&timeline->lock);
+}
+
+static void client_mark_guilty(struct drm_i915_file_private *file_priv,
+ const struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ unsigned long prev_hang;
+
+ if (i915_gem_context_is_banned(ctx))
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+ else
+ score = 0;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
+static bool context_mark_guilty(struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ bool banned, bannable;
+
+ atomic_inc(&ctx->guilty_count);
+
+ bannable = i915_gem_context_is_bannable(ctx);
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+
+ /* Cool contexts don't accumulate client ban score */
+ if (!bannable)
+ return false;
+
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
+ i915_gem_context_set_banned(ctx);
+ }
+
+ if (!IS_ERR_OR_NULL(ctx->file_priv))
+ client_mark_guilty(ctx->file_priv, ctx);
+
+ return banned;
+}
+
+static void context_mark_innocent(struct i915_gem_context *ctx)
+{
+ atomic_inc(&ctx->active_count);
+}
+
+void i915_reset_request(struct i915_request *rq, bool guilty)
+{
+ lockdep_assert_held(&rq->engine->timeline.lock);
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ if (guilty) {
+ i915_request_skip(rq, -EIO);
+ if (context_mark_guilty(rq->gem_context))
+ engine_skip_context(rq);
+ } else {
+ dma_fence_set_error(&rq->fence, -EAGAIN);
+ context_mark_innocent(rq->gem_context);
+ }
+}
+
+static void gen3_stop_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+
+ I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+ POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+
+ I915_WRITE_FW(RING_HEAD(base), 0);
+ I915_WRITE_FW(RING_TAIL(base), 0);
+ POSTING_READ_FW(RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_FW(RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (I915_READ_FW(RING_HEAD(base)) != 0)
+ DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+ engine->name);
+}
+
+static void i915_stop_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(i915) < 3)
+ return;
+
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen3_stop_engine(engine);
+}
+
+static bool i915_in_reset(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return gdrst & GRDOM_RESET_STATUS;
+}
+
+static int i915_do_reset(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int err;
+
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ udelay(50);
+ err = wait_for_atomic(i915_in_reset(pdev), 50);
+
+ /* Clear the reset request. */
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+ udelay(50);
+ if (!err)
+ err = wait_for_atomic(!i915_in_reset(pdev), 50);
+
+ return err;
+}
+
+static bool g4x_reset_complete(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int g33_do_reset(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for_atomic(g4x_reset_complete(pdev), 50);
+}
+
+static int g4x_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ int ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE_FW(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ_FW(VDECCLK_GATE_D);
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
+
+out:
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+
+ I915_WRITE_FW(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ_FW(VDECCLK_GATE_D);
+
+ return ret;
+}
+
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ int ret;
+
+ I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ ILK_GRDOM_RESET_ENABLE, 0,
+ 5000, 0,
+ NULL);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
+
+ I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ ILK_GRDOM_RESET_ENABLE, 0,
+ 5000, 0,
+ NULL);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
+
+out:
+ I915_WRITE_FW(ILK_GDSR, 0);
+ POSTING_READ_FW(ILK_GDSR);
+ return ret;
+}
+
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+ u32 hw_domain_mask)
+{
+ int err;
+
+ /*
+ * GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+
+ /* Wait for the device to ack the reset requests */
+ err = __intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500, 0,
+ NULL);
+ if (err)
+ DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
+
+ return err;
+}
+
+static int gen6_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN6_GRDOM_RENDER,
+ [BCS] = GEN6_GRDOM_BLT,
+ [VCS] = GEN6_GRDOM_MEDIA,
+ [VCS2] = GEN8_GRDOM_MEDIA2,
+ [VECS] = GEN6_GRDOM_VECS,
+ };
+ u32 hw_mask;
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN6_GRDOM_FULL;
+ } else {
+ unsigned int tmp;
+
+ hw_mask = 0;
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ return gen6_hw_domain_reset(i915, hw_mask);
+}
+
+static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
+ u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
+ i915_reg_t sfc_usage;
+ u32 sfc_usage_bit;
+ u32 sfc_reset_bit;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return 0;
+
+ sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+ sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+ sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+ sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+ sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+ sfc_usage = GEN11_VECS_SFC_USAGE(engine);
+ sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+ sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+ break;
+
+ default:
+ return 0;
+ }
+
+ /*
+ * Tell the engine that a software reset is going to happen. The engine
+ * will then try to force lock the SFC (if currently locked, it will
+ * remain so until we tell the engine it is safe to unlock; if currently
+ * unlocked, it will ignore this and all new lock requests). If SFC
+ * ends up being locked to the engine we want to reset, we have to reset
+ * it as well (we will unlock it once the reset sequence is completed).
+ */
+ I915_WRITE_FW(sfc_forced_lock,
+ I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+
+ if (__intel_wait_for_register_fw(dev_priv,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL)) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return 0;
+ }
+
+ if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+ return sfc_reset_bit;
+
+ return 0;
+}
+
+static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ i915_reg_t sfc_forced_lock;
+ u32 sfc_forced_lock_bit;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return;
+
+ sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+ break;
+
+ default:
+ return;
+ }
+
+ I915_WRITE_FW(sfc_forced_lock,
+ I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+}
+
+static int gen11_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN11_GRDOM_RENDER,
+ [BCS] = GEN11_GRDOM_BLT,
+ [VCS] = GEN11_GRDOM_MEDIA,
+ [VCS2] = GEN11_GRDOM_MEDIA2,
+ [VCS3] = GEN11_GRDOM_MEDIA3,
+ [VCS4] = GEN11_GRDOM_MEDIA4,
+ [VECS] = GEN11_GRDOM_VECS,
+ [VECS2] = GEN11_GRDOM_VECS2,
+ };
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ u32 hw_mask;
+ int ret;
+
+ BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN11_GRDOM_FULL;
+ } else {
+ hw_mask = 0;
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ hw_mask |= hw_engine_mask[engine->id];
+ hw_mask |= gen11_lock_sfc(i915, engine);
+ }
+ }
+
+ ret = gen6_hw_domain_reset(i915, hw_mask);
+
+ if (engine_mask != ALL_ENGINES)
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ gen11_unlock_sfc(i915, engine);
+
+ return ret;
+}
+
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ ret = __intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700, 0,
+ NULL);
+ if (ret)
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+ return ret;
+}
+
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ const bool reset_non_ready = retry >= 1;
+ unsigned int tmp;
+ int ret;
+
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ ret = gen8_engine_reset_prepare(engine);
+ if (ret && !reset_non_ready)
+ goto skip_reset;
+
+ /*
+ * If this is not the first failed attempt to prepare,
+ * we decide to proceed anyway.
+ *
+ * By doing so we risk context corruption and with
+ * some gens (kbl), possible system hang if reset
+ * happens during active bb execution.
+ *
+ * We rather take context corruption instead of
+ * failed reset with a wedged driver/gpu. And
+ * active bb execution case should be covered by
+ * i915_stop_engines we have before the reset.
+ */
+ }
+
+ if (INTEL_GEN(i915) >= 11)
+ ret = gen11_reset_engines(i915, engine_mask, retry);
+ else
+ ret = gen6_reset_engines(i915, engine_mask, retry);
+
+skip_reset:
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ gen8_engine_reset_cancel(engine);
+
+ return ret;
+}
+
+typedef int (*reset_func)(struct drm_i915_private *,
+ unsigned int engine_mask,
+ unsigned int retry);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+{
+ if (!i915_modparams.reset)
+ return NULL;
+
+ if (INTEL_GEN(i915) >= 8)
+ return gen8_reset_engines;
+ else if (INTEL_GEN(i915) >= 6)
+ return gen6_reset_engines;
+ else if (INTEL_GEN(i915) >= 5)
+ return ironlake_do_reset;
+ else if (IS_G4X(i915))
+ return g4x_do_reset;
+ else if (IS_G33(i915) || IS_PINEVIEW(i915))
+ return g33_do_reset;
+ else if (INTEL_GEN(i915) >= 3)
+ return i915_do_reset;
+ else
+ return NULL;
+}
+
+int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+{
+ const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
+ reset_func reset;
+ int ret = -ETIMEDOUT;
+ int retry;
+
+ reset = intel_get_gpu_reset(i915);
+ if (!reset)
+ return -ENODEV;
+
+ /*
+ * If the power well sleeps during the reset, the reset
+ * request may be dropped and never completes (causing -EIO).
+ */
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ i915_stop_engines(i915, engine_mask);
+
+ GEM_TRACE("engine_mask=%x\n", engine_mask);
+ preempt_disable();
+ ret = reset(i915, engine_mask, retry);
+ preempt_enable();
+ }
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915)
+{
+ if (USES_GUC(i915))
+ return false;
+
+ return intel_get_gpu_reset(i915);
+}
+
+bool intel_has_reset_engine(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+}
+
+int intel_reset_guc(struct drm_i915_private *i915)
+{
+ u32 guc_domain =
+ INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ int ret;
+
+ GEM_BUG_ON(!HAS_GUC(i915));
+
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(i915, guc_domain);
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+static void reset_prepare_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * During the reset sequence, we must prevent the engine from
+ * entering RC6. As the context state is undefined until we restart
+ * the engine, if it does enter RC6 during the reset, the state
+ * written to the powercontext is undefined and so we may lose
+ * GPU state upon resume, i.e. fail to restart after a reset.
+ */
+ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+ engine->reset.prepare(engine);
+}
+
+static void reset_prepare(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ reset_prepare_engine(engine);
+
+ intel_uc_sanitize(i915);
+}
+
+static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there.
+ */
+ err = i915_ggtt_enable_hw(i915);
+ if (err)
+ return err;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+
+ i915_gem_restore_fences(i915);
+
+ return err;
+}
+
+static void reset_finish_engine(struct intel_engine_cs *engine)
+{
+ engine->reset.finish(engine);
+ intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+}
+
+struct i915_gpu_restart {
+ struct work_struct work;
+ struct drm_i915_private *i915;
+};
+
+static void restart_work(struct work_struct *work)
+{
+ struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
+ struct drm_i915_private *i915 = arg->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ WRITE_ONCE(i915->gpu_error.restart, NULL);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ */
+ if (!intel_engine_is_idle(engine))
+ continue;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (!IS_ERR(rq))
+ i915_request_add(rq);
+ }
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(i915, wakeref);
+
+ kfree(arg);
+}
+
+static void reset_finish(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ reset_finish_engine(engine);
+}
+
+static void reset_restart(struct drm_i915_private *i915)
+{
+ struct i915_gpu_restart *arg;
+
+ /*
+ * Following the reset, ensure that we always reload context for
+ * powersaving, and to correct engine->last_retired_context. Since
+ * this requires us to submit a request, queue a worker to do that
+ * task for us to evade any locking here.
+ */
+ if (READ_ONCE(i915->gpu_error.restart))
+ return;
+
+ arg = kmalloc(sizeof(*arg), GFP_KERNEL);
+ if (arg) {
+ arg->i915 = i915;
+ INIT_WORK(&arg->work, restart_work);
+
+ WRITE_ONCE(i915->gpu_error.restart, arg);
+ queue_work(i915->wq, &arg->work);
+ }
+}
+
+static void nop_submit_request(struct i915_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
+ engine->name, request->fence.context, request->fence.seqno);
+ dma_fence_set_error(&request->fence, -EIO);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ __i915_request_submit(request);
+ i915_request_mark_complete(request);
+ intel_engine_write_global_seqno(engine, request->global_seqno);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ intel_engine_queue_breadcrumbs(engine);
+}
+
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ mutex_lock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags)) {
+ mutex_unlock(&error->wedge_mutex);
+ return;
+ }
+
+ if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+
+ GEM_TRACE("start\n");
+
+ /*
+ * First, stop submission to hw, but do not yet complete requests by
+ * rolling the global seqno forward (since this would complete requests
+ * for which we haven't set the fence error to EIO yet).
+ */
+ for_each_engine(engine, i915, id)
+ reset_prepare_engine(engine);
+
+ /* Even if the GPU reset fails, it should still stop the engines */
+ if (INTEL_GEN(i915) >= 5)
+ intel_gpu_reset(i915, ALL_ENGINES);
+
+ for_each_engine(engine, i915, id) {
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
+ }
+ i915->caps.scheduler = 0;
+
+ /*
+ * Make sure no request can slip through without getting completed by
+ * either this call here to intel_engine_write_global_seqno, or the one
+ * in nop_submit_request.
+ */
+ synchronize_rcu();
+
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+
+ for_each_engine(engine, i915, id) {
+ reset_finish_engine(engine);
+ intel_engine_signal_breadcrumbs(engine);
+ }
+
+ smp_mb__before_atomic();
+ set_bit(I915_WEDGED, &error->flags);
+
+ GEM_TRACE("end\n");
+ mutex_unlock(&error->wedge_mutex);
+
+ wake_up_all(&error->reset_queue);
+}
+
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct i915_timeline *tl;
+ bool ret = false;
+
+ if (!test_bit(I915_WEDGED, &error->flags))
+ return true;
+
+ if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+ return false;
+
+ mutex_lock(&error->wedge_mutex);
+
+ GEM_TRACE("start\n");
+
+ /*
+ * Before unwedging, make sure that all pending operations
+ * are flushed and errored out - we may have requests waiting upon
+ * third party fences. We marked all inflight requests as EIO, and
+ * every execbuf since returned EIO, for consistency we want all
+ * the currently pending requests to also be marked as EIO, which
+ * is done inside our nop_submit_request - and so we must wait.
+ *
+ * No more can be submitted until we reset the wedged bit.
+ */
+ mutex_lock(&i915->gt.timelines.mutex);
+ list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+ struct i915_request *rq;
+ long timeout;
+
+ rq = i915_active_request_get_unlocked(&tl->last_request);
+ if (!rq)
+ continue;
+
+ /*
+ * We can't use our normal waiter as we want to
+ * avoid recursively trying to handle the current
+ * reset. The basic dma_fence_default_wait() installs
+ * a callback for dma_fence_signal(), which is
+ * triggered by our nop handler (indirectly, the
+ * callback enables the signaler thread which is
+ * woken by the nop_submit_request() advancing the seqno
+ * and when the seqno passes the fence, the signaler
+ * then signals the fence waking us up).
+ */
+ timeout = dma_fence_default_wait(&rq->fence, true,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+ if (timeout < 0) {
+ mutex_unlock(&i915->gt.timelines.mutex);
+ goto unlock;
+ }
+ }
+ mutex_unlock(&i915->gt.timelines.mutex);
+
+ intel_engines_sanitize(i915, false);
+
+ /*
+ * Undo nop_submit_request. We prevent all new i915 requests from
+ * being queued (by disallowing execbuf whilst wedged) so having
+ * waited for all active requests above, we know the system is idle
+ * and do not have to worry about a thread being inside
+ * engine->submit_request() as we swap over. So unlike installing
+ * the nop_submit_request on reset, we can do this from normal
+ * context and do not require stop_machine().
+ */
+ intel_engines_reset_default_submission(i915);
+
+ GEM_TRACE("end\n");
+
+ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+ clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ ret = true;
+unlock:
+ mutex_unlock(&i915->gpu_error.wedge_mutex);
+
+ return ret;
+}
+
+struct __i915_reset {
+ struct drm_i915_private *i915;
+ unsigned int stalled_mask;
+};
+
+static int __i915_reset__BKL(void *data)
+{
+ struct __i915_reset *arg = data;
+ int err;
+
+ err = intel_gpu_reset(arg->i915, ALL_ENGINES);
+ if (err)
+ return err;
+
+ return gt_reset(arg->i915, arg->stalled_mask);
+}
+
+#if RESET_UNDER_STOP_MACHINE
+/*
+ * XXX An alternative to using stop_machine would be to park only the
+ * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
+ * we should be able to prevent their memmory accesses via the lost fence
+ * registers over the course of the reset without the potential recursive
+ * of mutexes between the pagefault handler and reset.
+ *
+ * See igt/gem_mmap_gtt/hang
+ */
+#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
+#else
+#define __do_reset(fn, arg) fn(arg)
+#endif
+
+static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+{
+ struct __i915_reset arg = { i915, stalled_mask };
+ int err, i;
+
+ err = __do_reset(__i915_reset__BKL, &arg);
+ for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
+ msleep(100);
+ err = __do_reset(__i915_reset__BKL, &arg);
+ }
+
+ return err;
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @i915: #drm_i915_private to reset
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
+ *
+ * Reset the chip. Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ int ret;
+
+ GEM_TRACE("flags=%lx\n", error->flags);
+
+ might_sleep();
+ assert_rpm_wakelock_held(i915);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
+
+ /* Clear any previous failed attempts at recovery. Time to try again. */
+ if (!i915_gem_unset_wedged(i915))
+ return;
+
+ if (reason)
+ dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
+ error->reset_count++;
+
+ reset_prepare(i915);
+
+ if (!intel_has_gpu_reset(i915)) {
+ if (i915_modparams.reset)
+ dev_err(i915->drm.dev, "GPU reset not supported\n");
+ else
+ DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ goto error;
+ }
+
+ if (do_reset(i915, stalled_mask)) {
+ dev_err(i915->drm.dev, "Failed to reset chip\n");
+ goto taint;
+ }
+
+ intel_overlay_reset(i915);
+
+ /*
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ ret = i915_gem_init_hw(i915);
+ if (ret) {
+ DRM_ERROR("Failed to initialise HW following reset (%d)\n",
+ ret);
+ goto error;
+ }
+
+ i915_queue_hangcheck(i915);
+
+finish:
+ reset_finish(i915);
+ if (!i915_terminally_wedged(error))
+ reset_restart(i915);
+ return;
+
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+error:
+ i915_gem_set_wedged(i915);
+ goto finish;
+}
+
+static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ return intel_gpu_reset(i915, intel_engine_flag(engine));
+}
+
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no dev_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+{
+ struct i915_gpu_error *error = &engine->i915->gpu_error;
+ int ret;
+
+ GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+ reset_prepare_engine(engine);
+
+ if (msg)
+ dev_notice(engine->i915->drm.dev,
+ "Resetting %s for %s\n", engine->name, msg);
+ error->reset_engine_count[engine->id]++;
+
+ if (!engine->i915->guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine->i915, engine);
+ else
+ ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+ engine->i915->guc.execbuf_client ? "GuC " : "",
+ engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ intel_engine_reset(engine, true);
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = engine->init_hw(engine);
+ if (ret)
+ goto out;
+
+out:
+ intel_engine_cancel_stop_cs(engine);
+ reset_finish_engine(engine);
+ return ret;
+}
+
+static void i915_reset_device(struct drm_i915_private *i915,
+ u32 engine_mask,
+ const char *reason)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ struct i915_wedge_me w;
+
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
+
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
+ /* Use a watchdog to ensure that our reset completes */
+ i915_wedge_on_timeout(&w, i915, 5 * HZ) {
+ intel_prepare_reset(i915);
+
+ i915_reset(i915, engine_mask, reason);
+
+ intel_finish_reset(i915);
+ }
+
+ if (!test_bit(I915_WEDGED, &error->flags))
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
+
+ if (!IS_GEN(dev_priv, 2))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+
+ I915_WRITE(EIR, I915_READ(EIR));
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
+ POSTING_READ(GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ I915_WRITE(RING_FAULT_REG(engine),
+ I915_READ(RING_FAULT_REG(engine)) &
+ ~RING_FAULT_VALID);
+ }
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ }
+}
+
+/**
+ * i915_handle_error - handle a gpu error
+ * @i915: i915 device private
+ * @engine_mask: mask representing engines that are hung
+ * @flags: control flags
+ * @fmt: Error message format string
+ *
+ * Do some basic checking of register state at error time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_i915_private *i915,
+ u32 engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
+{
+ struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ char error_msg[80];
+ char *msg = NULL;
+
+ if (fmt) {
+ va_list args;
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ msg = error_msg;
+ }
+
+ /*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugfs, so get an RPM reference.
+ */
+ wakeref = intel_runtime_pm_get(i915);
+
+ engine_mask &= INTEL_INFO(i915)->ring_mask;
+
+ if (flags & I915_ERROR_CAPTURE) {
+ i915_capture_error_state(i915, engine_mask, msg);
+ i915_clear_error_registers(i915);
+ }
+
+ /*
+ * Try engine reset when available. We fall back to full reset if
+ * single reset fails.
+ */
+ if (intel_has_reset_engine(i915) &&
+ !i915_terminally_wedged(&i915->gpu_error)) {
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags))
+ continue;
+
+ if (i915_reset_engine(engine, msg) == 0)
+ engine_mask &= ~intel_engine_flag(engine);
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ wake_up_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id);
+ }
+ }
+
+ if (!engine_mask)
+ goto out;
+
+ /* Full reset needs the mutex, stop any other user trying to do so. */
+ if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+ goto out;
+ }
+
+ /* Prevent any other reset-engine attempt. */
+ for_each_engine(engine, i915, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ i915_reset_device(i915, engine_mask, msg);
+
+ for_each_engine(engine, i915, tmp) {
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ }
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
+out:
+ intel_runtime_pm_put(i915, wakeref);
+}
+
+bool i915_reset_flush(struct drm_i915_private *i915)
+{
+ int err;
+
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
+ flush_workqueue(i915->wq);
+ GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ MAX_SCHEDULE_TIMEOUT);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return !err;
+}
+
+static void i915_wedge_me(struct work_struct *work)
+{
+ struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ dev_err(w->i915->drm.dev,
+ "%s timed out, cancelling all in-flight rendering.\n",
+ w->name);
+ i915_gem_set_wedged(w->i915);
+}
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+void __i915_fini_wedge(struct i915_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
new file mode 100644
index 000000000000..f2d347f319df
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -0,0 +1,59 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#ifndef I915_RESET_H
+#define I915_RESET_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+struct intel_guc;
+
+__printf(4, 5)
+void i915_handle_error(struct drm_i915_private *i915,
+ u32 engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
+
+void i915_clear_error_registers(struct drm_i915_private *i915);
+
+void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason);
+int i915_reset_engine(struct intel_engine_cs *engine,
+ const char *reason);
+
+void i915_reset_request(struct i915_request *rq, bool guilty);
+bool i915_reset_flush(struct drm_i915_private *i915);
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915);
+bool intel_has_reset_engine(struct drm_i915_private *i915);
+
+int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+
+int intel_reset_guc(struct drm_i915_private *i915);
+
+struct i915_wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name);
+void __i915_fini_wedge(struct i915_wedge_me *w);
+
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __i915_fini_wedge((W)))
+
+#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 340faea6c08a..d01683167c77 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -127,8 +127,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
-static void assert_priolists(struct intel_engine_execlists * const execlists,
- long queue_priority)
+static void assert_priolists(struct intel_engine_execlists * const execlists)
{
struct rb_node *rb;
long last_prio, i;
@@ -139,7 +138,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists,
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
- last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+ last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
@@ -166,7 +165,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
int idx, i;
lockdep_assert_held(&engine->timeline.lock);
- assert_priolists(execlists, INT_MAX);
+ assert_priolists(execlists);
/* buckets sorted from highest [in slot 0] to lowest priority */
idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
@@ -239,6 +238,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
return engine;
}
+static bool inflight(const struct i915_request *rq,
+ const struct intel_engine_cs *engine)
+{
+ const struct i915_request *active;
+
+ if (!i915_request_is_active(rq))
+ return false;
+
+ active = port_request(engine->execlists.port);
+ return active->hw_context == rq->hw_context;
+}
+
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
@@ -328,6 +339,7 @@ static void __i915_schedule(struct i915_request *rq,
INIT_LIST_HEAD(&dep->dfs_link);
engine = sched_lock_engine(node, engine);
+ lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
@@ -353,20 +365,19 @@ static void __i915_schedule(struct i915_request *rq,
continue;
}
- if (prio <= engine->execlists.queue_priority)
+ if (prio <= engine->execlists.queue_priority_hint)
continue;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
- if (node_to_request(node)->global_seqno &&
- i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
- node_to_request(node)->global_seqno))
+ if (inflight(node_to_request(node), engine))
continue;
/* Defer (tasklet) submission until after all of our updates. */
- engine->execlists.queue_priority = prio;
tasklet_hi_schedule(&engine->execlists.tasklet);
}
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index a73472dd12fd..207e21b478f2 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -31,6 +31,7 @@ struct i915_selftest {
unsigned long timeout_jiffies;
unsigned int timeout_ms;
unsigned int random_seed;
+ char *filter;
int mock;
int live;
};
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8f3aa4dc0c98..d2f2a9c2fabd 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,7 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
i915_save_display(dev_priv);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -77,17 +76,17 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
- if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
- } else if (HAS_GMCH_DISPLAY(dev_priv)) {
+ } else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev_priv);
@@ -122,17 +121,17 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
/* Scratch space */
- if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
- } else if (HAS_GMCH_DISPLAY(dev_priv)) {
+ } else if (HAS_GMCH(dev_priv)) {
for (i = 0; i < 16; i++) {
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index fc2eeab823b7..7c58b049ecb5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -390,7 +390,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
- pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
+ pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c0cfe7ae2ba5..41313005af42 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,11 +42,11 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- u64 res;
+ intel_wakeref_t wakeref;
+ u64 res = 0;
- intel_runtime_pm_get(dev_priv);
- res = intel_rc6_residency_us(dev_priv, reg);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -258,9 +258,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ intel_wakeref_t wakeref;
int ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -274,7 +275,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
}
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
@@ -354,6 +355,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@@ -361,7 +363,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@@ -371,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@@ -392,7 +394,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}
@@ -412,6 +414,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@@ -419,7 +422,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@@ -429,7 +432,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@@ -446,7 +449,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index 4667cc08c416..b2202d2e58a2 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -9,34 +9,199 @@
#include "i915_timeline.h"
#include "i915_syncmap.h"
-void i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *timeline,
- const char *name)
+struct i915_timeline_hwsp {
+ struct i915_vma *vma;
+ struct list_head free_link;
+ u64 free_bitmap;
+};
+
+static inline struct i915_timeline_hwsp *
+i915_timeline_hwsp(const struct i915_timeline *tl)
+{
+ return tl->hwsp_ggtt->private;
+}
+
+static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
+}
+
+static struct i915_vma *
+hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
+ struct drm_i915_private *i915 = timeline->i915;
+ struct i915_gt_timelines *gt = &i915->gt.timelines;
+ struct i915_timeline_hwsp *hwsp;
+
+ BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
+
+ spin_lock(&gt->hwsp_lock);
+
+ /* hwsp_free_list only contains HWSP that have available cachelines */
+ hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
+ typeof(*hwsp), free_link);
+ if (!hwsp) {
+ struct i915_vma *vma;
+
+ spin_unlock(&gt->hwsp_lock);
+
+ hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
+ if (!hwsp)
+ return ERR_PTR(-ENOMEM);
+
+ vma = __hwsp_alloc(i915);
+ if (IS_ERR(vma)) {
+ kfree(hwsp);
+ return vma;
+ }
+
+ vma->private = hwsp;
+ hwsp->vma = vma;
+ hwsp->free_bitmap = ~0ull;
+
+ spin_lock(&gt->hwsp_lock);
+ list_add(&hwsp->free_link, &gt->hwsp_free_list);
+ }
+
+ GEM_BUG_ON(!hwsp->free_bitmap);
+ *cacheline = __ffs64(hwsp->free_bitmap);
+ hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
+ if (!hwsp->free_bitmap)
+ list_del(&hwsp->free_link);
+
+ spin_unlock(&gt->hwsp_lock);
+
+ GEM_BUG_ON(hwsp->vma->private != hwsp);
+ return hwsp->vma;
+}
+
+static void hwsp_free(struct i915_timeline *timeline)
+{
+ struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
+ struct i915_timeline_hwsp *hwsp;
+
+ hwsp = i915_timeline_hwsp(timeline);
+ if (!hwsp) /* leave global HWSP alone! */
+ return;
+
+ spin_lock(&gt->hwsp_lock);
+
+ /* As a cacheline becomes available, publish the HWSP on the freelist */
+ if (!hwsp->free_bitmap)
+ list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
+
+ hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+
+ /* And if no one is left using it, give the page back to the system */
+ if (hwsp->free_bitmap == ~0ull) {
+ i915_vma_put(hwsp->vma);
+ list_del(&hwsp->free_link);
+ kfree(hwsp);
+ }
+
+ spin_unlock(&gt->hwsp_lock);
+}
+
+int i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *timeline,
+ const char *name,
+ struct i915_vma *hwsp)
+{
+ void *vaddr;
/*
* Ideally we want a set of engines on a single leaf as we expect
* to mostly be tracking synchronisation between engines. It is not
* a huge issue if this is not the case, but we may want to mitigate
* any page crossing penalties if they become an issue.
+ *
+ * Called during early_init before we know how many engines there are.
*/
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+ timeline->i915 = i915;
timeline->name = name;
+ timeline->pin_count = 0;
+ timeline->has_initial_breadcrumb = !hwsp;
- list_add(&timeline->link, &i915->gt.timelines);
+ timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+ if (!hwsp) {
+ unsigned int cacheline;
+
+ hwsp = hwsp_alloc(timeline, &cacheline);
+ if (IS_ERR(hwsp))
+ return PTR_ERR(hwsp);
+
+ timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
+ }
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+
+ vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ hwsp_free(timeline);
+ i915_vma_put(hwsp);
+ return PTR_ERR(vaddr);
+ }
- /* Called during early_init before we know how many engines there are */
+ timeline->hwsp_seqno =
+ memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
- init_request_active(&timeline->last_request, NULL);
+ INIT_ACTIVE_REQUEST(&timeline->barrier);
+ INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+
+ return 0;
+}
+
+void i915_timelines_init(struct drm_i915_private *i915)
+{
+ struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+ mutex_init(&gt->mutex);
+ INIT_LIST_HEAD(&gt->active_list);
+
+ spin_lock_init(&gt->hwsp_lock);
+ INIT_LIST_HEAD(&gt->hwsp_free_list);
+
+ /* via i915_gem_wait_for_idle() */
+ i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+}
+
+static void timeline_add_to_active(struct i915_timeline *tl)
+{
+ struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+
+ mutex_lock(&gt->mutex);
+ list_add(&tl->link, &gt->active_list);
+ mutex_unlock(&gt->mutex);
+}
+
+static void timeline_remove_from_active(struct i915_timeline *tl)
+{
+ struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+
+ mutex_lock(&gt->mutex);
+ list_del(&tl->link);
+ mutex_unlock(&gt->mutex);
}
/**
@@ -51,11 +216,11 @@ void i915_timeline_init(struct drm_i915_private *i915,
*/
void i915_timelines_park(struct drm_i915_private *i915)
{
+ struct i915_gt_timelines *gt = &i915->gt.timelines;
struct i915_timeline *timeline;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ mutex_lock(&gt->mutex);
+ list_for_each_entry(timeline, &gt->active_list, link) {
/*
* All known fences are completed so we can scrap
* the current sync point tracking and start afresh,
@@ -64,32 +229,88 @@ void i915_timelines_park(struct drm_i915_private *i915)
*/
i915_syncmap_free(&timeline->sync);
}
+ mutex_unlock(&gt->mutex);
}
void i915_timeline_fini(struct i915_timeline *timeline)
{
+ GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
+ GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
+ hwsp_free(timeline);
- list_del(&timeline->link);
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ i915_vma_put(timeline->hwsp_ggtt);
}
struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915, const char *name)
+i915_timeline_create(struct drm_i915_private *i915,
+ const char *name,
+ struct i915_vma *global_hwsp)
{
struct i915_timeline *timeline;
+ int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
- i915_timeline_init(i915, timeline, name);
+ err = i915_timeline_init(i915, timeline, name, global_hwsp);
+ if (err) {
+ kfree(timeline);
+ return ERR_PTR(err);
+ }
+
kref_init(&timeline->kref);
return timeline;
}
+int i915_timeline_pin(struct i915_timeline *tl)
+{
+ int err;
+
+ if (tl->pin_count++)
+ return 0;
+ GEM_BUG_ON(!tl->pin_count);
+
+ err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto unpin;
+
+ tl->hwsp_offset =
+ i915_ggtt_offset(tl->hwsp_ggtt) +
+ offset_in_page(tl->hwsp_offset);
+
+ timeline_add_to_active(tl);
+
+ return 0;
+
+unpin:
+ tl->pin_count = 0;
+ return err;
+}
+
+void i915_timeline_unpin(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ if (--tl->pin_count)
+ return;
+
+ timeline_remove_from_active(tl);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
+
+ __i915_vma_unpin(tl->hwsp_ggtt);
+}
+
void __i915_timeline_free(struct kref *kref)
{
struct i915_timeline *timeline =
@@ -99,6 +320,16 @@ void __i915_timeline_free(struct kref *kref)
kfree(timeline);
}
+void i915_timelines_fini(struct drm_i915_private *i915)
+{
+ struct i915_gt_timelines *gt = &i915->gt.timelines;
+
+ GEM_BUG_ON(!list_empty(&gt->active_list));
+ GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+
+ mutex_destroy(&gt->mutex);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_timeline.c"
#include "selftests/i915_timeline.c"
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index ebd71b487220..7bec7d2e45bf 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -28,10 +28,14 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include "i915_active.h"
#include "i915_request.h"
#include "i915_syncmap.h"
#include "i915_utils.h"
+struct i915_vma;
+struct i915_timeline_hwsp;
+
struct i915_timeline {
u64 fence_context;
u32 seqno;
@@ -40,6 +44,13 @@ struct i915_timeline {
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
+ unsigned int pin_count;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ bool has_initial_breadcrumb;
+
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
@@ -48,10 +59,10 @@ struct i915_timeline {
/* Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
- * the request using i915_gem_active_get_request_rcu(), or hold the
+ * the request using i915_active_request_get_request_rcu(), or hold the
* struct_mutex.
*/
- struct i915_gem_active last_request;
+ struct i915_active_request last_request;
/**
* We track the most recent seqno that we wait on in every context so
@@ -63,24 +74,28 @@ struct i915_timeline {
* redundant and we can discard it without loss of generality.
*/
struct i915_syncmap *sync;
+
/**
- * Separately to the inter-context seqno map above, we track the last
- * barrier (e.g. semaphore wait) to the global engine timelines. Note
- * that this tracks global_seqno rather than the context.seqno, and
- * so it is subject to the limitations of hw wraparound and that we
- * may need to revoke global_seqno (on pre-emption).
+ * Barrier provides the ability to serialize ordering between different
+ * timelines.
+ *
+ * Users can call i915_timeline_set_barrier which will make all
+ * subsequent submissions to this timeline be executed only after the
+ * barrier has been completed.
*/
- u32 global_sync[I915_NUM_ENGINES];
+ struct i915_active_request barrier;
struct list_head link;
const char *name;
+ struct drm_i915_private *i915;
struct kref kref;
};
-void i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *tl,
- const char *name);
+int i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *tl,
+ const char *name,
+ struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
static inline void
@@ -103,7 +118,9 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
}
struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915, const char *name);
+i915_timeline_create(struct drm_i915_private *i915,
+ const char *name,
+ struct i915_vma *global_hwsp);
static inline struct i915_timeline *
i915_timeline_get(struct i915_timeline *timeline)
@@ -142,6 +159,26 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
}
+int i915_timeline_pin(struct i915_timeline *tl);
+void i915_timeline_unpin(struct i915_timeline *tl);
+
+void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
+void i915_timelines_fini(struct drm_i915_private *i915);
+
+/**
+ * i915_timeline_set_barrier - orders submission between different timelines
+ * @timeline: timeline to set the barrier on
+ * @rq: request after which new submissions can proceed
+ *
+ * Sets the passed in request as the serialization point for all subsequent
+ * submissions on @timeline. Subsequent requests will not be submitted to GPU
+ * until the barrier has been completed.
+ */
+static inline int
+i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
+{
+ return i915_active_request_set(&tl->barrier, rq);
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index b50c6b829715..eab313c3163c 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,7 +6,8 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -585,35 +586,6 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
-TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct i915_request *to, struct i915_request *from),
- TP_ARGS(to, from),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, from_class)
- __field(u32, from_instance)
- __field(u32, to_class)
- __field(u32, to_instance)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = from->i915->drm.primary->index;
- __entry->from_class = from->engine->uabi_class;
- __entry->from_instance = from->engine->instance;
- __entry->to_class = to->engine->uabi_class;
- __entry->to_instance = to->engine->instance;
- __entry->seqno = from->global_seqno;
- ),
-
- TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
- __entry->dev,
- __entry->from_class, __entry->from_instance,
- __entry->to_class, __entry->to_instance,
- __entry->seqno)
-);
-
TRACE_EVENT(i915_request_queue,
TP_PROTO(struct i915_request *rq, u32 flags),
TP_ARGS(rq, flags),
@@ -780,31 +752,6 @@ trace_i915_request_out(struct i915_request *rq)
#endif
#endif
-TRACE_EVENT(intel_engine_notify,
- TP_PROTO(struct intel_engine_cs *engine, bool waiters),
- TP_ARGS(engine, waiters),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u16, class)
- __field(u16, instance)
- __field(u32, seqno)
- __field(bool, waiters)
- ),
-
- TP_fast_assign(
- __entry->dev = engine->i915->drm.primary->index;
- __entry->class = engine->uabi_class;
- __entry->instance = engine->instance;
- __entry->seqno = intel_engine_get_seqno(engine);
- __entry->waiters = waiters;
- ),
-
- TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
- __entry->dev, __entry->class, __entry->instance,
- __entry->seqno, __entry->waiters)
-);
-
DEFINE_EVENT(i915_request, i915_request_retire,
TP_PROTO(struct i915_request *rq),
TP_ARGS(rq)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5b4d78cdb4ca..b713bed20c38 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -63,24 +63,22 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
#endif
-struct i915_vma_active {
- struct i915_gem_active base;
- struct i915_vma *vma;
- struct rb_node node;
- u64 timeline;
-};
-
-static void
-__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
+static void obj_bump_mru(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- GEM_BUG_ON(!i915_vma_is_active(vma));
- if (--vma->active_count)
- return;
+ spin_lock(&i915->mm.obj_lock);
+ if (obj->bind_count)
+ list_move_tail(&obj->mm.link, &i915->mm.bound_list);
+ spin_unlock(&i915->mm.obj_lock);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->mm.dirty = true; /* be paranoid */
+}
+
+static void __i915_vma_retire(struct i915_active *ref)
+{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), active);
+ struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!i915_gem_object_is_active(obj));
if (--obj->active_count)
@@ -93,16 +91,12 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
reservation_object_unlock(obj->resv);
}
- /* Bump our place on the bound list to keep it roughly in LRU order
+ /*
+ * Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
- spin_lock(&rq->i915->mm.obj_lock);
- if (obj->bind_count)
- list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
- spin_unlock(&rq->i915->mm.obj_lock);
-
- obj->mm.dirty = true; /* be paranoid */
+ obj_bump_mru(obj);
if (i915_gem_object_has_active_reference(obj)) {
i915_gem_object_clear_active_reference(obj);
@@ -110,21 +104,6 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
}
}
-static void
-i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
-{
- struct i915_vma_active *active =
- container_of(base, typeof(*active), base);
-
- __i915_vma_retire(active->vma, rq);
-}
-
-static void
-i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
-{
- __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
-}
-
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -140,10 +119,9 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->active = RB_ROOT;
+ i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+ INIT_ACTIVE_REQUEST(&vma->last_fence);
- init_request_active(&vma->last_active, i915_vma_last_retire);
- init_request_active(&vma->last_fence, NULL);
vma->vm = vm;
vma->ops = &vm->vma_ops;
vma->obj = obj;
@@ -190,33 +168,56 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
- /*
- * We put the GGTT vma at the start of the vma-list, followed
- * by the ppGGTT vma. This allows us to break early when
- * iterating over only the GGTT vma for an object, see
- * for_each_ggtt_vma()
- */
vma->flags |= I915_VMA_GGTT;
- list_add(&vma->obj_link, &obj->vma_list);
- } else {
- list_add_tail(&vma->obj_link, &obj->vma_list);
}
+ spin_lock(&obj->vma.lock);
+
rb = NULL;
- p = &obj->vma_tree.rb_node;
+ p = &obj->vma.tree.rb_node;
while (*p) {
struct i915_vma *pos;
+ long cmp;
rb = *p;
pos = rb_entry(rb, struct i915_vma, obj_node);
- if (i915_vma_compare(pos, vm, view) < 0)
+
+ /*
+ * If the view already exists in the tree, another thread
+ * already created a matching vma, so return the older instance
+ * and dispose of ours.
+ */
+ cmp = i915_vma_compare(pos, vm, view);
+ if (cmp == 0) {
+ spin_unlock(&obj->vma.lock);
+ kmem_cache_free(vm->i915->vmas, vma);
+ return pos;
+ }
+
+ if (cmp < 0)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&vma->obj_node, rb, p);
- rb_insert_color(&vma->obj_node, &obj->vma_tree);
+ rb_insert_color(&vma->obj_node, &obj->vma.tree);
+
+ if (i915_vma_is_ggtt(vma))
+ /*
+ * We put the GGTT vma at the start of the vma-list, followed
+ * by the ppGGTT vma. This allows us to break early when
+ * iterating over only the GGTT vma for an object, see
+ * for_each_ggtt_vma()
+ */
+ list_add(&vma->obj_link, &obj->vma.list);
+ else
+ list_add_tail(&vma->obj_link, &obj->vma.list);
+
+ spin_unlock(&obj->vma.lock);
+
+ mutex_lock(&vm->mutex);
list_add(&vma->vm_link, &vm->unbound_list);
+ mutex_unlock(&vm->mutex);
return vma;
@@ -232,7 +233,7 @@ vma_lookup(struct drm_i915_gem_object *obj,
{
struct rb_node *rb;
- rb = obj->vma_tree.rb_node;
+ rb = obj->vma.tree.rb_node;
while (rb) {
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
long cmp;
@@ -272,16 +273,18 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
GEM_BUG_ON(vm->closed);
+ spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
- if (!vma)
+ spin_unlock(&obj->vma.lock);
+
+ /* vma_create() will resolve the race if another creates the vma */
+ if (unlikely(!vma))
vma = vma_create(obj, vm, view);
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
- GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
return vma;
}
@@ -659,7 +662,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ mutex_lock(&vma->vm->mutex);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -692,8 +697,10 @@ i915_vma_remove(struct i915_vma *vma)
vma->ops->clear_pages(vma);
+ mutex_lock(&vma->vm->mutex);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+ mutex_unlock(&vma->vm->mutex);
/*
* Since the unbound list is global, only move to that list if
@@ -797,23 +804,27 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- struct i915_vma_active *iter, *n;
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+ GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
- list_del(&vma->obj_link);
+ mutex_lock(&vma->vm->mutex);
list_del(&vma->vm_link);
- if (vma->obj)
- rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+ mutex_unlock(&vma->vm->mutex);
+
+ if (vma->obj) {
+ struct drm_i915_gem_object *obj = vma->obj;
- rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
- GEM_BUG_ON(i915_gem_active_isset(&iter->base));
- kfree(iter);
+ spin_lock(&obj->vma.lock);
+ list_del(&vma->obj_link);
+ rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ spin_unlock(&obj->vma.lock);
}
+ i915_active_fini(&vma->active);
+
kmem_cache_free(i915->vmas, vma);
}
@@ -897,104 +908,15 @@ static void export_fence(struct i915_vma *vma,
reservation_object_unlock(resv);
}
-static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
-{
- struct i915_vma_active *active;
- struct rb_node **p, *parent;
- struct i915_request *old;
-
- /*
- * We track the most recently used timeline to skip a rbtree search
- * for the common case, under typical loads we never need the rbtree
- * at all. We can reuse the last_active slot if it is empty, that is
- * after the previous activity has been retired, or if the active
- * matches the current timeline.
- *
- * Note that we allow the timeline to be active simultaneously in
- * the rbtree and the last_active cache. We do this to avoid having
- * to search and replace the rbtree element for a new timeline, with
- * the cost being that we must be aware that the vma may be retired
- * twice for the same timeline (as the older rbtree element will be
- * retired before the new request added to last_active).
- */
- old = i915_gem_active_raw(&vma->last_active,
- &vma->vm->i915->drm.struct_mutex);
- if (!old || old->fence.context == idx)
- goto out;
-
- /* Move the currently active fence into the rbtree */
- idx = old->fence.context;
-
- parent = NULL;
- p = &vma->active.rb_node;
- while (*p) {
- parent = *p;
-
- active = rb_entry(parent, struct i915_vma_active, node);
- if (active->timeline == idx)
- goto replace;
-
- if (active->timeline < idx)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
-
- active = kmalloc(sizeof(*active), GFP_KERNEL);
-
- /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
- if (unlikely(!i915_gem_active_raw(&vma->last_active,
- &vma->vm->i915->drm.struct_mutex))) {
- kfree(active);
- goto out;
- }
-
- if (unlikely(!active))
- return ERR_PTR(-ENOMEM);
-
- init_request_active(&active->base, i915_vma_retire);
- active->vma = vma;
- active->timeline = idx;
-
- rb_link_node(&active->node, parent, p);
- rb_insert_color(&active->node, &vma->active);
-
-replace:
- /*
- * Overwrite the previous active slot in the rbtree with last_active,
- * leaving last_active zeroed. If the previous slot is still active,
- * we must be careful as we now only expect to receive one retire
- * callback not two, and so much undo the active counting for the
- * overwritten slot.
- */
- if (i915_gem_active_isset(&active->base)) {
- /* Retire ourselves from the old rq->active_list */
- __list_del_entry(&active->base.link);
- vma->active_count--;
- GEM_BUG_ON(!vma->active_count);
- }
- GEM_BUG_ON(list_empty(&vma->last_active.link));
- list_replace_init(&vma->last_active.link, &active->base.link);
- active->base.request = fetch_and_zero(&vma->last_active.request);
-
-out:
- return &vma->last_active;
-}
-
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct i915_gem_active *active;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- active = active_instance(vma, rq->fence.context);
- if (IS_ERR(active))
- return PTR_ERR(active);
-
/*
* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is
@@ -1003,11 +925,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_active_isset(active) && !vma->active_count++) {
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
+ if (!vma->active.count)
obj->active_count++;
+
+ if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
+ if (!vma->active.count)
+ obj->active_count--;
+ return -ENOMEM;
}
- i915_gem_active_set(active, rq);
+
GEM_BUG_ON(!i915_vma_is_active(vma));
GEM_BUG_ON(!obj->active_count);
@@ -1016,14 +942,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- i915_gem_active_set(&obj->frontbuffer_write, rq);
+ __i915_active_request_set(&obj->frontbuffer_write, rq);
obj->read_domains = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
- i915_gem_active_set(&vma->last_fence, rq);
+ __i915_active_request_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
return 0;
@@ -1041,8 +967,6 @@ int i915_vma_unbind(struct i915_vma *vma)
*/
might_sleep();
if (i915_vma_is_active(vma)) {
- struct i915_vma_active *active, *n;
-
/*
* When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
@@ -1058,21 +982,12 @@ int i915_vma_unbind(struct i915_vma *vma)
*/
__i915_vma_pin(vma);
- ret = i915_gem_active_retire(&vma->last_active,
- &vma->vm->i915->drm.struct_mutex);
+ ret = i915_active_wait(&vma->active);
if (ret)
goto unpin;
- rbtree_postorder_for_each_entry_safe(active, n,
- &vma->active, node) {
- ret = i915_gem_active_retire(&active->base,
- &vma->vm->i915->drm.struct_mutex);
- if (ret)
- goto unpin;
- }
-
- ret = i915_gem_active_retire(&vma->last_fence,
- &vma->vm->i915->drm.struct_mutex);
+ ret = i915_active_request_retire(&vma->last_fence,
+ &vma->vm->i915->drm.struct_mutex);
unpin:
__i915_vma_unpin(vma);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4f7c1c7599f4..7c742027f866 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -34,6 +34,7 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
+#include "i915_active.h"
#include "i915_request.h"
enum i915_cache_level;
@@ -71,34 +72,45 @@ struct i915_vma {
unsigned int open_count;
unsigned long flags;
/**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
+ * How many users have pinned this object in GTT space.
*
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits.
+ * This is a tightly bound, fairly small number of users, so we
+ * stuff inside the flags field so that we can both check for overflow
+ * and detect a no-op i915_vma_pin() in a single check, while also
+ * pinning the vma.
+ *
+ * The worst case display setup would have the same vma pinned for
+ * use on each plane on each crtc, while also building the next atomic
+ * state and holding a pin for the length of the cleanup queue. In the
+ * future, the flip queue may be increased from 1.
+ * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
+ *
+ * For GEM, the number of concurrent users for pwrite/pread is
+ * unbounded. For execbuffer, it is currently one but will in future
+ * be extended to allow multiple clients to pin vma concurrently.
+ *
+ * We also use suballocated pages, with each suballocation claiming
+ * its own pin on the shared vma. At present, this is limited to
+ * exclusive cachelines of a single page, so a maximum of 64 possible
+ * users.
*/
-#define I915_VMA_PIN_MASK 0xf
-#define I915_VMA_PIN_OVERFLOW BIT(5)
+#define I915_VMA_PIN_MASK 0xff
+#define I915_VMA_PIN_OVERFLOW BIT(8)
/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(6)
-#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_GLOBAL_BIND BIT(9)
+#define I915_VMA_LOCAL_BIND BIT(10)
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
-#define I915_VMA_GGTT BIT(8)
-#define I915_VMA_CAN_FENCE BIT(9)
-#define I915_VMA_CLOSED BIT(10)
-#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_GGTT BIT(11)
+#define I915_VMA_CAN_FENCE BIT(12)
+#define I915_VMA_CLOSED BIT(13)
+#define I915_VMA_USERFAULT_BIT 14
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(12)
+#define I915_VMA_GGTT_WRITE BIT(15)
- unsigned int active_count;
- struct rb_root active;
- struct i915_gem_active last_active;
- struct i915_gem_active last_fence;
+ struct i915_active active;
+ struct i915_active_request last_fence;
/**
* Support different GGTT views into the same object.
@@ -141,9 +153,9 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
#define I915_VMA_RELEASE_MAP BIT(0)
-static inline bool i915_vma_is_active(struct i915_vma *vma)
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
{
- return vma->active_count;
+ return !i915_active_is_idle(&vma->active);
}
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
@@ -425,7 +437,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
* or the list is empty ofc.
*/
#define for_each_ggtt_vma(V, OBJ) \
- list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \
+ list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
#endif
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 4dd793b78996..73a7bee24a66 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_display_power_get(dev_priv, port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
}
}
@@ -1125,10 +1127,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
enum port port;
u32 tmp;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
-
- if (intel_dsi->dual_link)
- intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_wakeref_t wakeref;
+
+ wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
+ if (wakeref) {
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ wakeref);
+ }
+ }
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1178,9 +1188,9 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
}
-static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
@@ -1205,7 +1215,7 @@ static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->clock_set = true;
pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
- return true;
+ return 0;
}
static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
@@ -1229,13 +1239,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 tmp;
- enum port port;
enum transcoder dsi_trans;
+ intel_wakeref_t wakeref;
+ enum port port;
bool ret = false;
+ u32 tmp;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1260,7 +1272,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
ret = tmp & PIPECONF_ENABLE;
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -1378,6 +1390,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->disable = gen11_dsi_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
+ encoder->update_pipe = intel_panel_update_backlight;
encoder->compute_config = gen11_dsi_compute_config;
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 6ba478e57b9b..9d142d038a7d 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -6,7 +6,6 @@
*/
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8cb02f28d30c..7cf9290ea34a 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -29,10 +29,11 @@
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "intel_drv.h"
/**
@@ -47,7 +48,7 @@
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -79,7 +80,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -233,7 +234,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
- if (IS_GEN9(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 0a73e6e65c20..db0965904439 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -31,9 +31,10 @@
* prepare/check/commit/cleanup steps.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "intel_drv.h"
struct intel_plane *intel_plane_alloc(void)
@@ -111,41 +112,39 @@ intel_plane_destroy_state(struct drm_plane *plane,
}
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *intel_state)
+ struct intel_plane_state *new_plane_state)
{
- struct drm_plane *plane = intel_state->base.plane;
- struct drm_plane_state *state = &intel_state->base;
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
int ret;
- crtc_state->active_planes &= ~BIT(intel_plane->id);
- crtc_state->nv12_planes &= ~BIT(intel_plane->id);
- intel_state->base.visible = false;
+ new_crtc_state->active_planes &= ~BIT(plane->id);
+ new_crtc_state->nv12_planes &= ~BIT(plane->id);
+ new_plane_state->base.visible = false;
- /* If this is a cursor plane, no further checks are needed. */
- if (!intel_state->base.crtc && !old_plane_state->base.crtc)
+ if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- ret = intel_plane->check_plane(crtc_state, intel_state);
+ ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
/* FIXME pre-g4x don't work like this */
- if (state->visible)
- crtc_state->active_planes |= BIT(intel_plane->id);
+ if (new_plane_state->base.visible)
+ new_crtc_state->active_planes |= BIT(plane->id);
- if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
- crtc_state->nv12_planes |= BIT(intel_plane->id);
+ if (new_plane_state->base.visible &&
+ new_plane_state->base.fb->format->format == DRM_FORMAT_NV12)
+ new_crtc_state->nv12_planes |= BIT(plane->id);
- if (state->visible || old_plane_state->base.visible)
- crtc_state->update_planes |= BIT(intel_plane->id);
+ if (new_plane_state->base.visible || old_plane_state->base.visible)
+ new_crtc_state->update_planes |= BIT(plane->id);
return intel_plane_atomic_calc_changes(old_crtc_state,
- &crtc_state->base,
+ &new_crtc_state->base,
old_plane_state,
- state);
+ &new_plane_state->base);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
@@ -312,7 +311,7 @@ int
intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
@@ -335,7 +334,7 @@ int
intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b32681632f30..5104c6bbd66f 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -27,7 +27,6 @@
#include <drm/intel_lpe_audio.h>
#include "intel_drv.h"
-#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "i915_drv.h"
@@ -749,7 +748,8 @@ static void i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev)
{
- intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+ intel_display_power_put_unchecked(kdev_to_i915(kdev),
+ POWER_DOMAIN_AUDIO);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
@@ -758,7 +758,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
- if (!IS_GEN9(dev_priv))
+ if (!IS_GEN(dev_priv, 9))
return;
i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6d3e0260d49c..b508d8a735e0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_GEN(dev_priv, 3, 7)) {
+ if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return;
}
@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->supports_dp = is_dp;
info->supports_edp = is_edp;
- DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
- port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+ if (bdb_version >= 195)
+ info->supports_typec_usb = child->dp_usb_type_c;
+
+ if (bdb_version >= 209)
+ info->supports_tbt = child->tbt;
+
+ DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
+ port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+ info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
@@ -1657,6 +1663,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
+ /*
+ * VBT has the TypeC mode (native,TBT/USB) and we don't want
+ * to detect it.
+ */
+ if (intel_port_is_tc(dev_priv, port))
+ continue;
+
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
info->supports_dp = (port != PORT_E);
@@ -1940,6 +1953,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
};
int i;
+ if (HAS_DDI(dev_priv)) {
+ const struct ddi_vbt_port_info *port_info =
+ &dev_priv->vbt.ddi_port_info[port];
+
+ return port_info->supports_dp ||
+ port_info->supports_dvi ||
+ port_info->supports_hdmi;
+ }
+
/* FIXME maybe deal with port A as well? */
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 447c5256f63a..cacaa1d04d17 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -29,180 +29,146 @@
#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
-static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
+static void irq_enable(struct intel_engine_cs *engine)
{
- struct intel_wait *wait;
- unsigned int result = 0;
-
- lockdep_assert_held(&b->irq_lock);
-
- wait = b->irq_wait;
- if (wait) {
- /*
- * N.B. Since task_asleep() and ttwu are not atomic, the
- * waiter may actually go to sleep after the check, causing
- * us to suppress a valid wakeup. We prefer to reduce the
- * number of false positive missed_breadcrumb() warnings
- * at the expense of a few false negatives, as it it easy
- * to trigger a false positive under heavy load. Enough
- * signal should remain from genuine missed_breadcrumb()
- * for us to detect in CI.
- */
- bool was_asleep = task_asleep(wait->tsk);
-
- result = ENGINE_WAKEUP_WAITER;
- if (wake_up_process(wait->tsk) && was_asleep)
- result |= ENGINE_WAKEUP_ASLEEP;
- }
+ if (!engine->irq_enable)
+ return;
- return result;
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
-unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
+static void irq_disable(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned long flags;
- unsigned int result;
-
- spin_lock_irqsave(&b->irq_lock, flags);
- result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
-
- return result;
-}
+ if (!engine->irq_disable)
+ return;
-static unsigned long wait_timeout(void)
-{
- return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
-static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
+ lockdep_assert_held(&b->irq_lock);
- intel_engine_dump(engine, &p,
- "%s missed breadcrumb at %pS\n",
- engine->name, __builtin_return_address(0));
- }
+ GEM_BUG_ON(!b->irq_enabled);
+ if (!--b->irq_enabled)
+ irq_disable(container_of(b,
+ struct intel_engine_cs,
+ breadcrumbs));
- set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+ b->irq_armed = false;
}
-static void intel_breadcrumbs_hangcheck(struct timer_list *t)
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine =
- from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned int irq_count;
if (!b->irq_armed)
return;
- irq_count = READ_ONCE(b->irq_count);
- if (b->hangcheck_interrupts != irq_count) {
- b->hangcheck_interrupts = irq_count;
- mod_timer(&b->hangcheck, wait_timeout());
- return;
- }
+ spin_lock_irq(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock_irq(&b->irq_lock);
+}
- /* We keep the hangcheck timer alive until we disarm the irq, even
- * if there are no waiters at present.
- *
- * If the waiter was currently running, assume it hasn't had a chance
- * to process the pending interrupt (e.g, low priority task on a loaded
- * system) and wait until it sleeps before declaring a missed interrupt.
- *
- * If the waiter was asleep (and not even pending a wakeup), then we
- * must have missed an interrupt as the GPU has stopped advancing
- * but we still have a waiter. Assuming all batches complete within
- * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
- */
- if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
- missed_breadcrumb(engine);
- mod_timer(&b->fake_irq, jiffies + 1);
- } else {
- mod_timer(&b->hangcheck, wait_timeout());
- }
+static inline bool __request_completed(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
-static void intel_breadcrumbs_fake_irq(struct timer_list *t)
+bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine =
- from_timer(engine, t, breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_context *ce, *cn;
+ struct list_head *pos, *next;
+ LIST_HEAD(signal);
- /*
- * The timer persists in case we cannot enable interrupts,
- * or if we have previously seen seqno/interrupt incoherency
- * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
- * Here the worker will wake up every jiffie in order to kick the
- * oldest waiter to do the coherent seqno check.
- */
+ spin_lock(&b->irq_lock);
- spin_lock_irq(&b->irq_lock);
- if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
- __intel_engine_disarm_breadcrumbs(engine);
- spin_unlock_irq(&b->irq_lock);
- if (!b->irq_armed)
- return;
+ if (b->irq_armed && list_empty(&b->signalers))
+ __intel_breadcrumbs_disarm_irq(b);
- /* If the user has disabled the fake-irq, restore the hangchecking */
- if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
- mod_timer(&b->hangcheck, wait_timeout());
- return;
- }
+ list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
+ GEM_BUG_ON(list_empty(&ce->signals));
- mod_timer(&b->fake_irq, jiffies + 1);
-}
+ list_for_each_safe(pos, next, &ce->signals) {
+ struct i915_request *rq =
+ list_entry(pos, typeof(*rq), signal_link);
-static void irq_enable(struct intel_engine_cs *engine)
-{
- /*
- * FIXME: Ideally we want this on the API boundary, but for the
- * sake of testing with mock breadcrumbs (no HW so unable to
- * enable irqs) we place it deep within the bowels, at the point
- * of no return.
- */
- GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
+ if (!__request_completed(rq))
+ break;
- /* Enabling the IRQ may miss the generation of the interrupt, but
- * we still need to force the barrier before reading the seqno,
- * just in case.
- */
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- /* Caller disables interrupts */
- if (engine->irq_enable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ /*
+ * We may race with direct invocation of
+ * dma_fence_signal(), e.g. i915_request_retire(),
+ * in which case we can skip processing it ourselves.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags))
+ continue;
+
+ /*
+ * Queue for execution after dropping the signaling
+ * spinlock as the callback chain may end up adding
+ * more signalers to the same context or engine.
+ */
+ i915_request_get(rq);
+ list_add_tail(&rq->signal_link, &signal);
+ }
+
+ /*
+ * We process the list deletion in bulk, only using a list_add
+ * (not list_move) above but keeping the status of
+ * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
+ */
+ if (!list_is_first(pos, &ce->signals)) {
+ /* Advance the list to the first incomplete request */
+ __list_del_many(&ce->signals, pos);
+ if (&ce->signals == pos) /* now empty */
+ list_del_init(&ce->signal_link);
+ }
}
-}
-static void irq_disable(struct intel_engine_cs *engine)
-{
- /* Caller disables interrupts */
- if (engine->irq_disable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&b->irq_lock);
+
+ list_for_each_safe(pos, next, &signal) {
+ struct i915_request *rq =
+ list_entry(pos, typeof(*rq), signal_link);
+
+ dma_fence_signal(&rq->fence);
+ i915_request_put(rq);
}
+
+ return !list_empty(&signal);
}
-void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ bool result;
- lockdep_assert_held(&b->irq_lock);
- GEM_BUG_ON(b->irq_wait);
- GEM_BUG_ON(!b->irq_armed);
+ local_irq_disable();
+ result = intel_engine_breadcrumbs_irq(engine);
+ local_irq_enable();
- GEM_BUG_ON(!b->irq_enabled);
- if (!--b->irq_enabled)
- irq_disable(engine);
+ return result;
+}
- b->irq_armed = false;
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), breadcrumbs.irq_work);
+
+ intel_engine_breadcrumbs_irq(engine);
}
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -227,666 +193,155 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
spin_unlock_irq(&b->irq_lock);
}
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait, *n;
-
- if (!b->irq_armed)
- return;
-
- /*
- * We only disarm the irq when we are idle (all requests completed),
- * so if the bottom-half remains asleep, it missed the request
- * completion.
- */
- if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
- missed_breadcrumb(engine);
-
- spin_lock_irq(&b->rb_lock);
-
- spin_lock(&b->irq_lock);
- b->irq_wait = NULL;
- if (b->irq_armed)
- __intel_engine_disarm_breadcrumbs(engine);
- spin_unlock(&b->irq_lock);
-
- rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
- GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
- RB_CLEAR_NODE(&wait->node);
- wake_up_process(wait->tsk);
- }
- b->waiters = RB_ROOT;
-
- spin_unlock_irq(&b->rb_lock);
-}
-
-static bool use_fake_irq(const struct intel_breadcrumbs *b)
-{
- const struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
-
- if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
- return false;
-
- /*
- * Only start with the heavy weight fake irq timer if we have not
- * seen any interrupts since enabling it the first time. If the
- * interrupts are still arriving, it means we made a mistake in our
- * engine->seqno_barrier(), a timing error that should be transient
- * and unlikely to reoccur.
- */
- return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
-}
-
-static void enable_fake_irq(struct intel_breadcrumbs *b)
-{
- /* Ensure we never sleep indefinitely */
- if (!b->irq_enabled || use_fake_irq(b))
- mod_timer(&b->fake_irq, jiffies + 1);
- else
- mod_timer(&b->hangcheck, wait_timeout());
-}
-
-static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
- struct drm_i915_private *i915 = engine->i915;
- bool enabled;
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
- return false;
+ return;
- /* The breadcrumb irq will be disarmed on the interrupt after the
+ /*
+ * The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
b->irq_armed = true;
- if (I915_SELFTEST_ONLY(b->mock)) {
- /* For our mock objects we want to avoid interaction
- * with the real hardware (which is not set up). So
- * we simply pretend we have enabled the powerwell
- * and the irq, and leave it up to the mock
- * implementation to call intel_engine_wakeup()
- * itself when it wants to simulate a user interrupt,
- */
- return true;
- }
-
- /* Since we are waiting on a request, the GPU should be busy
+ /*
+ * Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
* the driver is idle) we disarm the breadcrumbs.
*/
- /* No interrupts? Kick the waiter every jiffie! */
- enabled = false;
- if (!b->irq_enabled++ &&
- !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
+ if (!b->irq_enabled++)
irq_enable(engine);
- enabled = true;
- }
-
- enable_fake_irq(b);
- return enabled;
-}
-
-static inline struct intel_wait *to_wait(struct rb_node *node)
-{
- return rb_entry(node, struct intel_wait, node);
-}
-
-static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
- struct intel_wait *wait)
-{
- lockdep_assert_held(&b->rb_lock);
- GEM_BUG_ON(b->irq_wait == wait);
-
- /*
- * This request is completed, so remove it from the tree, mark it as
- * complete, and *then* wake up the associated task. N.B. when the
- * task wakes up, it will find the empty rb_node, discern that it
- * has already been removed from the tree and skip the serialisation
- * of the b->rb_lock and b->irq_lock. This means that the destruction
- * of the intel_wait is not serialised with the interrupt handler
- * by the waiter - it must instead be serialised by the caller.
- */
- rb_erase(&wait->node, &b->waiters);
- RB_CLEAR_NODE(&wait->node);
-
- if (wait->tsk->state != TASK_RUNNING)
- wake_up_process(wait->tsk); /* implicit smp_wmb() */
-}
-
-static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
- struct rb_node *next)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock(&b->irq_lock);
- GEM_BUG_ON(!b->irq_armed);
- GEM_BUG_ON(!b->irq_wait);
- b->irq_wait = to_wait(next);
- spin_unlock(&b->irq_lock);
-
- /* We always wake up the next waiter that takes over as the bottom-half
- * as we may delegate not only the irq-seqno barrier to the next waiter
- * but also the task of waking up concurrent waiters.
- */
- if (next)
- wake_up_process(to_wait(next)->tsk);
}
-static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node **p, *parent, *completed;
- bool first, armed;
- u32 seqno;
-
- GEM_BUG_ON(!wait->seqno);
-
- /* Insert the request into the retirement ordered list
- * of waiters by walking the rbtree. If we are the oldest
- * seqno in the tree (the first to be retired), then
- * set ourselves as the bottom-half.
- *
- * As we descend the tree, prune completed branches since we hold the
- * spinlock we know that the first_waiter must be delayed and can
- * reduce some of the sequential wake up latency if we take action
- * ourselves and wake up the completed tasks in parallel. Also, by
- * removing stale elements in the tree, we may be able to reduce the
- * ping-pong between the old bottom-half and ourselves as first-waiter.
- */
- armed = false;
- first = true;
- parent = NULL;
- completed = NULL;
- seqno = intel_engine_get_seqno(engine);
-
- /* If the request completed before we managed to grab the spinlock,
- * return now before adding ourselves to the rbtree. We let the
- * current bottom-half handle any pending wakeups and instead
- * try and get out of the way quickly.
- */
- if (i915_seqno_passed(seqno, wait->seqno)) {
- RB_CLEAR_NODE(&wait->node);
- return first;
- }
-
- p = &b->waiters.rb_node;
- while (*p) {
- parent = *p;
- if (wait->seqno == to_wait(parent)->seqno) {
- /* We have multiple waiters on the same seqno, select
- * the highest priority task (that with the smallest
- * task->prio) to serve as the bottom-half for this
- * group.
- */
- if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
- p = &parent->rb_right;
- first = false;
- } else {
- p = &parent->rb_left;
- }
- } else if (i915_seqno_passed(wait->seqno,
- to_wait(parent)->seqno)) {
- p = &parent->rb_right;
- if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
- completed = parent;
- else
- first = false;
- } else {
- p = &parent->rb_left;
- }
- }
- rb_link_node(&wait->node, parent, p);
- rb_insert_color(&wait->node, &b->waiters);
-
- if (first) {
- spin_lock(&b->irq_lock);
- b->irq_wait = wait;
- /* After assigning ourselves as the new bottom-half, we must
- * perform a cursory check to prevent a missed interrupt.
- * Either we miss the interrupt whilst programming the hardware,
- * or if there was a previous waiter (for a later seqno) they
- * may be woken instead of us (due to the inherent race
- * in the unlocked read of b->irq_seqno_bh in the irq handler)
- * and so we miss the wake up.
- */
- armed = __intel_breadcrumbs_enable_irq(b);
- spin_unlock(&b->irq_lock);
- }
- if (completed) {
- /* Advance the bottom-half (b->irq_wait) before we wake up
- * the waiters who may scribble over their intel_wait
- * just as the interrupt handler is dereferencing it via
- * b->irq_wait.
- */
- if (!first) {
- struct rb_node *next = rb_next(completed);
- GEM_BUG_ON(next == &wait->node);
- __intel_breadcrumbs_next(engine, next);
- }
-
- do {
- struct intel_wait *crumb = to_wait(completed);
- completed = rb_prev(completed);
- __intel_breadcrumbs_finish(b, crumb);
- } while (completed);
- }
-
- GEM_BUG_ON(!b->irq_wait);
- GEM_BUG_ON(!b->irq_armed);
- GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
+ spin_lock_init(&b->irq_lock);
+ INIT_LIST_HEAD(&b->signalers);
- return armed;
+ init_irq_work(&b->irq_work, signal_irq_work);
}
-bool intel_engine_add_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- bool armed;
-
- spin_lock_irq(&b->rb_lock);
- armed = __intel_engine_add_wait(engine, wait);
- spin_unlock_irq(&b->rb_lock);
- if (armed)
- return armed;
-
- /* Make the caller recheck if its request has already started. */
- return intel_engine_has_started(engine, wait->seqno);
-}
+ unsigned long flags;
-static inline bool chain_wakeup(struct rb_node *rb, int priority)
-{
- return rb && to_wait(rb)->tsk->prio <= priority;
-}
+ spin_lock_irqsave(&b->irq_lock, flags);
-static inline int wakeup_priority(struct intel_breadcrumbs *b,
- struct task_struct *tsk)
-{
- if (tsk == b->signaler)
- return INT_MIN;
+ if (b->irq_enabled)
+ irq_enable(engine);
else
- return tsk->prio;
-}
-
-static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- lockdep_assert_held(&b->rb_lock);
-
- if (RB_EMPTY_NODE(&wait->node))
- goto out;
-
- if (b->irq_wait == wait) {
- const int priority = wakeup_priority(b, wait->tsk);
- struct rb_node *next;
-
- /* We are the current bottom-half. Find the next candidate,
- * the first waiter in the queue on the remaining oldest
- * request. As multiple seqnos may complete in the time it
- * takes us to wake up and find the next waiter, we have to
- * wake up that waiter for it to perform its own coherent
- * completion check.
- */
- next = rb_next(&wait->node);
- if (chain_wakeup(next, priority)) {
- /* If the next waiter is already complete,
- * wake it up and continue onto the next waiter. So
- * if have a small herd, they will wake up in parallel
- * rather than sequentially, which should reduce
- * the overall latency in waking all the completed
- * clients.
- *
- * However, waking up a chain adds extra latency to
- * the first_waiter. This is undesirable if that
- * waiter is a high priority task.
- */
- u32 seqno = intel_engine_get_seqno(engine);
-
- while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
- struct rb_node *n = rb_next(next);
-
- __intel_breadcrumbs_finish(b, to_wait(next));
- next = n;
- if (!chain_wakeup(next, priority))
- break;
- }
- }
-
- __intel_breadcrumbs_next(engine, next);
- } else {
- GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
- rb_erase(&wait->node, &b->waiters);
- RB_CLEAR_NODE(&wait->node);
+ irq_disable(engine);
-out:
- GEM_BUG_ON(b->irq_wait == wait);
- GEM_BUG_ON(rb_first(&b->waiters) !=
- (b->irq_wait ? &b->irq_wait->node : NULL));
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- /* Quick check to see if this waiter was already decoupled from
- * the tree by the bottom-half to avoid contention on the spinlock
- * by the herd.
- */
- if (RB_EMPTY_NODE(&wait->node)) {
- GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
- return;
- }
-
- spin_lock_irq(&b->rb_lock);
- __intel_engine_remove_wait(engine, wait);
- spin_unlock_irq(&b->rb_lock);
}
-static void signaler_set_rtpriority(void)
+bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct sched_param param = { .sched_priority = 1 };
-
- sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
-}
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-static int intel_breadcrumbs_signaler(void *arg)
-{
- struct intel_engine_cs *engine = arg;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct i915_request *rq, *n;
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
- /* Install ourselves with high priority to reduce signalling latency */
- signaler_set_rtpriority();
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ return true;
- do {
- bool do_schedule = true;
- LIST_HEAD(list);
- u32 seqno;
+ spin_lock(&b->irq_lock);
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
+ !__request_completed(rq)) {
+ struct intel_context *ce = rq->hw_context;
+ struct list_head *pos;
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&b->signals))
- goto sleep;
+ __intel_breadcrumbs_arm_irq(b);
/*
- * We are either woken up by the interrupt bottom-half,
- * or by a client adding a new signaller. In both cases,
- * the GPU seqno may have advanced beyond our oldest signal.
- * If it has, propagate the signal, remove the waiter and
- * check again with the next oldest signal. Otherwise we
- * need to wait for a new interrupt from the GPU or for
- * a new client.
+ * We keep the seqno in retirement order, so we can break
+ * inside intel_engine_breadcrumbs_irq as soon as we've passed
+ * the last completed request (or seen a request that hasn't
+ * event started). We could iterate the timeline->requests list,
+ * but keeping a separate signalers_list has the advantage of
+ * hopefully being much smaller than the full list and so
+ * provides faster iteration and detection when there are no
+ * more interrupts required for this context.
+ *
+ * We typically expect to add new signalers in order, so we
+ * start looking for our insertion point from the tail of
+ * the list.
*/
- seqno = intel_engine_get_seqno(engine);
-
- spin_lock_irq(&b->rb_lock);
- list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
- u32 this = rq->signaling.wait.seqno;
-
- GEM_BUG_ON(!rq->signaling.wait.seqno);
-
- if (!i915_seqno_passed(seqno, this))
- break;
-
- if (likely(this == i915_request_global_seqno(rq))) {
- __intel_engine_remove_wait(engine,
- &rq->signaling.wait);
+ list_for_each_prev(pos, &ce->signals) {
+ struct i915_request *it =
+ list_entry(pos, typeof(*it), signal_link);
- rq->signaling.wait.seqno = 0;
- __list_del_entry(&rq->signaling.link);
-
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags)) {
- list_add_tail(&rq->signaling.link,
- &list);
- i915_request_get(rq);
- }
- }
- }
- spin_unlock_irq(&b->rb_lock);
-
- if (!list_empty(&list)) {
- local_bh_disable();
- list_for_each_entry_safe(rq, n, &list, signaling.link) {
- dma_fence_signal(&rq->fence);
- GEM_BUG_ON(!i915_request_completed(rq));
- i915_request_put(rq);
- }
- local_bh_enable(); /* kick start the tasklets */
-
- /*
- * If the engine is saturated we may be continually
- * processing completed requests. This angers the
- * NMI watchdog if we never let anything else
- * have access to the CPU. Let's pretend to be nice
- * and relinquish the CPU if we burn through the
- * entire RT timeslice!
- */
- do_schedule = need_resched();
- }
-
- if (unlikely(do_schedule)) {
- /* Before we sleep, check for a missed seqno */
- if (current->state & TASK_NORMAL &&
- !list_empty(&b->signals) &&
- engine->irq_seqno_barrier &&
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)) {
- engine->irq_seqno_barrier(engine);
- intel_engine_wakeup(engine);
- }
-
-sleep:
- if (kthread_should_park())
- kthread_parkme();
-
- if (unlikely(kthread_should_stop()))
+ if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
break;
-
- schedule();
}
- } while (1);
- __set_current_state(TASK_RUNNING);
+ list_add(&rq->signal_link, pos);
+ if (pos == &ce->signals) /* catch transitions from empty list */
+ list_move_tail(&ce->signal_link, &b->signalers);
- return 0;
-}
-
-static void insert_signal(struct intel_breadcrumbs *b,
- struct i915_request *request,
- const u32 seqno)
-{
- struct i915_request *iter;
-
- lockdep_assert_held(&b->rb_lock);
-
- /*
- * A reasonable assumption is that we are called to add signals
- * in sequence, as the requests are submitted for execution and
- * assigned a global_seqno. This will be the case for the majority
- * of internally generated signals (inter-engine signaling).
- *
- * Out of order waiters triggering random signaling enabling will
- * be more problematic, but hopefully rare enough and the list
- * small enough that the O(N) insertion sort is not an issue.
- */
-
- list_for_each_entry_reverse(iter, &b->signals, signaling.link)
- if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
- break;
-
- list_add(&request->signaling.link, &iter->signaling.link);
-}
-
-bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
-{
- struct intel_engine_cs *engine = request->engine;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait = &request->signaling.wait;
- u32 seqno;
-
- /*
- * Note that we may be called from an interrupt handler on another
- * device (e.g. nouveau signaling a fence completion causing us
- * to submit a request, and so enable signaling). As such,
- * we need to make sure that all other users of b->rb_lock protect
- * against interrupts, i.e. use spin_lock_irqsave.
- */
-
- /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&request->lock);
-
- seqno = i915_request_global_seqno(request);
- if (!seqno) /* will be enabled later upon execution */
- return true;
-
- GEM_BUG_ON(wait->seqno);
- wait->tsk = b->signaler;
- wait->request = request;
- wait->seqno = seqno;
-
- /*
- * Add ourselves into the list of waiters, but registering our
- * bottom-half as the signaller thread. As per usual, only the oldest
- * waiter (not just signaller) is tasked as the bottom-half waking
- * up all completed waiters after the user interrupt.
- *
- * If we are the oldest waiter, enable the irq (after which we
- * must double check that the seqno did not complete).
- */
- spin_lock(&b->rb_lock);
- insert_signal(b, request, seqno);
- wakeup &= __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->rb_lock);
-
- if (wakeup) {
- wake_up_process(b->signaler);
- return !intel_wait_complete(wait);
+ set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
}
+ spin_unlock(&b->irq_lock);
- return true;
+ return !__request_completed(rq);
}
-void intel_engine_cancel_signaling(struct i915_request *request)
+void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_engine_cs *engine = request->engine;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&request->lock);
-
- if (!READ_ONCE(request->signaling.wait.seqno))
+ if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- spin_lock(&b->rb_lock);
- __intel_engine_remove_wait(engine, &request->signaling.wait);
- if (fetch_and_zero(&request->signaling.wait.seqno))
- __list_del_entry(&request->signaling.link);
- spin_unlock(&b->rb_lock);
-}
-
-int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct task_struct *tsk;
-
- spin_lock_init(&b->rb_lock);
- spin_lock_init(&b->irq_lock);
-
- timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
- timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
-
- INIT_LIST_HEAD(&b->signals);
-
- /* Spawn a thread to provide a common bottom-half for all signals.
- * As this is an asynchronous interface we cannot steal the current
- * task for handling the bottom-half to the user interrupt, therefore
- * we create a thread to do the coherent seqno dance after the
- * interrupt and then signal the waitqueue (via the dma-buf/fence).
- */
- tsk = kthread_run(intel_breadcrumbs_signaler, engine,
- "i915/signal:%d", engine->id);
- if (IS_ERR(tsk))
- return PTR_ERR(tsk);
-
- b->signaler = tsk;
-
- return 0;
-}
-
-static void cancel_fake_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
- del_timer_sync(&b->hangcheck);
- clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-}
-
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned long flags;
-
- spin_lock_irqsave(&b->irq_lock, flags);
-
- /*
- * Leave the fake_irq timer enabled (if it is running), but clear the
- * bit so that it turns itself off on its next wake up and goes back
- * to the long hangcheck interval if still required.
- */
- clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-
- if (b->irq_enabled)
- irq_enable(engine);
- else
- irq_disable(engine);
+ spin_lock(&b->irq_lock);
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ struct intel_context *ce = rq->hw_context;
- /*
- * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
- * GPU is active and may have already executed the MI_USER_INTERRUPT
- * before the CPU is ready to receive. However, the engine is currently
- * idle (we haven't started it yet), there is no possibility for a
- * missed interrupt as we enabled the irq and so we can clear the
- * immediate wakeup (until a real interrupt arrives for the waiter).
- */
- clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ list_del(&rq->signal_link);
+ if (list_empty(&ce->signals))
+ list_del_init(&ce->signal_link);
- spin_unlock_irqrestore(&b->irq_lock, flags);
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ }
+ spin_unlock(&b->irq_lock);
}
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_context *ce;
+ struct i915_request *rq;
- /* The engines should be idle and all requests accounted for! */
- WARN_ON(READ_ONCE(b->irq_wait));
- WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
- WARN_ON(!list_empty(&b->signals));
+ if (list_empty(&b->signalers))
+ return;
- if (!IS_ERR_OR_NULL(b->signaler))
- kthread_stop(b->signaler);
+ drm_printf(p, "Signals:\n");
- cancel_fake_irq(engine);
+ spin_lock_irq(&b->irq_lock);
+ list_for_each_entry(ce, &b->signalers, signal_link) {
+ list_for_each_entry(rq, &ce->signals, signal_link) {
+ drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
+ rq->fence.context, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ }
+ }
+ spin_unlock_irq(&b->irq_lock);
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_breadcrumbs.c"
-#endif
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 25e3aba9cded..15ba950dee00 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
};
const unsigned int *vco_table;
unsigned int vco;
- uint8_t tmp = 0;
+ u8 tmp = 0;
/* FIXME other chipsets? */
if (IS_GM45(dev_priv))
@@ -249,13 +249,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
- static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
- static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
- static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
- const uint8_t *div_table;
+ static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const u8 *div_table;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -330,12 +330,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 16, 10, 8 };
- static const uint8_t div_4000[] = { 20, 12, 10 };
- static const uint8_t div_5333[] = { 24, 16, 14 };
- const uint8_t *div_table;
+ static const u8 div_3200[] = { 16, 10, 8 };
+ static const u8 div_4000[] = { 20, 12, 10 };
+ static const u8 div_5333[] = { 24, 16, 14 };
+ const u8 *div_table;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -375,7 +375,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -403,8 +403,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+ u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@@ -520,6 +520,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
+ intel_wakeref_t wakeref;
switch (cdclk) {
case 400000:
@@ -539,7 +540,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -593,7 +594,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -601,6 +602,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
+ intel_wakeref_t wakeref;
switch (cdclk) {
case 333333:
@@ -619,7 +621,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -637,7 +639,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -670,8 +672,8 @@ static u8 bdw_calc_voltage_level(int cdclk)
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+ u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@@ -698,7 +700,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- uint32_t val;
+ u32 val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@@ -1081,7 +1083,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- uint32_t cdctl, expected;
+ u32 cdctl, expected;
/*
* check if the pre-os initialized the display
@@ -2140,7 +2142,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEN9(dev_priv) ||
+ else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2176,7 +2178,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
- } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
@@ -2537,7 +2539,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEN9(dev_priv) ||
+ else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2688,7 +2690,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
static int g4x_hrawclk(struct drm_i915_private *dev_priv)
{
- uint32_t clkcfg;
+ u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
clkcfg = I915_READ(CLKCFG);
@@ -2785,9 +2787,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = hsw_get_cdclk;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_cdclk = vlv_get_cdclk;
- else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
else if (IS_GM45(dev_priv))
dev_priv->display.get_cdclk = gm45_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 5127da286a2b..71a1f12c6b2a 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,12 +74,17 @@
#define ILK_CSC_COEFF_1_0 \
((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
-static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
+static bool lut_is_legacy(const struct drm_property_blob *lut)
{
- return !state->degamma_lut &&
- !state->ctm &&
- state->gamma_lut &&
- drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
+ return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
+{
+ return !crtc_state->base.degamma_lut &&
+ !crtc_state->base.ctm &&
+ crtc_state->base.gamma_lut &&
+ lut_is_legacy(crtc_state->base.gamma_lut);
}
/*
@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
{
- int pipe = intel_crtc->pipe;
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
@@ -132,29 +137,28 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
}
-static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int i, pipe = intel_crtc->pipe;
- uint16_t coeffs[9] = { 0, };
- struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool limited_color_range = false;
+ enum pipe pipe = crtc->pipe;
+ u16 coeffs[9] = {};
+ int i;
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
- limited_color_range = intel_crtc_state->limited_color_range;
+ limited_color_range = crtc_state->limited_color_range;
- if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
- ilk_load_ycbcr_conversion_matrix(intel_crtc);
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ ilk_load_ycbcr_conversion_matrix(crtc);
return;
- } else if (crtc_state->ctm) {
- struct drm_color_ctm *ctm = crtc_state->ctm->data;
+ } else if (crtc_state->base.ctm) {
+ struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
const u64 *input;
u64 temp[9];
@@ -168,7 +172,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
* hardware.
*/
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
@@ -230,7 +234,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
if (INTEL_GEN(dev_priv) > 6) {
- uint16_t postoff = 0;
+ u16 postoff = 0;
if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
@@ -241,7 +245,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
} else {
- uint32_t mode = CSC_MODE_YUV_TO_RGB;
+ u32 mode = CSC_MODE_YUV_TO_RGB;
if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
@@ -253,21 +257,20 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
/*
* Set up the pipe CSC unit on CherryView.
*/
-static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = to_intel_crtc(crtc)->pipe;
- uint32_t mode;
-
- if (state->ctm) {
- struct drm_color_ctm *ctm = state->ctm->data;
- uint16_t coeffs[9] = { 0, };
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 mode;
+
+ if (crtc_state->base.ctm) {
+ const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ u16 coeffs[9] = {};
int i;
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- uint64_t abs_coeff =
+ u64 abs_coeff =
((1ULL << 63) - 1) & ctm->matrix[i];
/* Round coefficient. */
@@ -293,35 +296,24 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
- mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy_gamma(state)) {
- mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+ mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
+ if (!crtc_state_is_legacy_gamma(crtc_state)) {
+ mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
}
-void intel_color_set_csc(struct drm_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (dev_priv->display.load_csc_matrix)
- dev_priv->display.load_csc_matrix(crtc_state);
-}
-
/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(struct drm_crtc *crtc,
- struct drm_property_blob *blob,
- struct intel_crtc_state *crtc_state)
+static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
int i;
- if (HAS_GMCH_DISPLAY(dev_priv)) {
+ if (HAS_GMCH(dev_priv)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -329,23 +321,24 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
if (blob) {
- struct drm_color_lut *lut = blob->data;
+ const struct drm_color_lut *lut = blob->data;
+
for (i = 0; i < 256; i++) {
- uint32_t word =
+ u32 word =
(drm_color_lut_extract(lut[i].red, 8) << 16) |
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
}
} else {
for (i = 0; i < 256; i++) {
- uint32_t word = (i << 16) | (i << 8) | i;
+ u32 word = (i << 16) | (i << 8) | i;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
@@ -353,56 +346,37 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
}
-static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
- to_intel_crtc_state(crtc_state));
+ i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
-/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
-static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
- bool reenable_ips = false;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- /*
- * Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- */
- if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
- (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc_state);
- reenable_ips = true;
- }
+ I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
- I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
-
- i9xx_load_luts(crtc_state);
-
- if (reenable_ips)
- hsw_enable_ips(intel_crtc_state);
+ ilk_load_csc_matrix(crtc_state);
}
-static void bdw_load_degamma_lut(struct drm_crtc_state *state)
+static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ enum pipe pipe = crtc->pipe;
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
- if (state->degamma_lut) {
- struct drm_color_lut *lut = state->degamma_lut->data;
+ if (degamma_lut) {
+ const struct drm_color_lut *lut = degamma_lut->data;
for (i = 0; i < lut_size; i++) {
- uint32_t word =
+ u32 word =
drm_color_lut_extract(lut[i].red, 10) << 20 |
drm_color_lut_extract(lut[i].green, 10) << 10 |
drm_color_lut_extract(lut[i].blue, 10);
@@ -411,7 +385,7 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
}
} else {
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@@ -419,11 +393,13 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
}
}
-static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
@@ -432,11 +408,11 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
PAL_PREC_AUTO_INCREMENT |
offset);
- if (state->gamma_lut) {
- struct drm_color_lut *lut = state->gamma_lut->data;
+ if (gamma_lut) {
+ const struct drm_color_lut *lut = gamma_lut->data;
for (i = 0; i < lut_size; i++) {
- uint32_t word =
+ u32 word =
(drm_color_lut_extract(lut[i].red, 10) << 20) |
(drm_color_lut_extract(lut[i].green, 10) << 10) |
drm_color_lut_extract(lut[i].blue, 10);
@@ -454,7 +430,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
drm_color_lut_extract(lut[i].blue, 16));
} else {
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@@ -467,38 +443,34 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc_state *state)
+static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
-
- if (crtc_state_is_legacy_gamma(state)) {
- haswell_load_luts(state);
- return;
- }
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- bdw_load_degamma_lut(state);
- bdw_load_gamma_lut(state,
- INTEL_INFO(dev_priv)->color.degamma_lut_size);
-
- intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
- I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
- POSTING_READ(GAMMA_MODE(pipe));
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ i9xx_load_luts(crtc_state);
+ } else {
+ bdw_load_degamma_lut(crtc_state);
+ bdw_load_gamma_lut(crtc_state,
+ INTEL_INFO(dev_priv)->color.degamma_lut_size);
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ }
}
-static void glk_load_degamma_lut(struct drm_crtc_state *state)
+static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- const uint32_t lut_size = 33;
- uint32_t i;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const u32 lut_size = 33;
+ u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -513,7 +485,7 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * (1 << 16)) / (lut_size - 1);
+ u32 v = (i * (1 << 16)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
@@ -523,51 +495,49 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
}
-static void glk_load_luts(struct drm_crtc_state *state)
+static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- glk_load_degamma_lut(state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (crtc_state_is_legacy_gamma(state)) {
- haswell_load_luts(state);
- return;
- }
+ glk_load_degamma_lut(crtc_state);
- bdw_load_gamma_lut(state, 0);
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ i9xx_load_luts(crtc_state);
+ } else {
+ bdw_load_gamma_lut(crtc_state, 0);
- intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
- I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
- POSTING_READ(GAMMA_MODE(pipe));
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ }
}
-/* Loads the palette/gamma unit for the CRTC on CherryView. */
-static void cherryview_load_luts(struct drm_crtc_state *state)
+static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- struct drm_color_lut *lut;
- uint32_t i, lut_size;
- uint32_t word0, word1;
-
- if (crtc_state_is_legacy_gamma(state)) {
- /* Turn off degamma/gamma on CGM block. */
- I915_WRITE(CGM_PIPE_MODE(pipe),
- (state->ctm ? CGM_PIPE_MODE_CSC : 0));
- i9xx_load_luts_internal(crtc, state->gamma_lut,
- to_intel_crtc_state(state));
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ enum pipe pipe = crtc->pipe;
+
+ cherryview_load_csc_matrix(crtc_state);
+
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ i9xx_load_luts_internal(crtc_state, gamma_lut);
return;
}
- if (state->degamma_lut) {
- lut = state->degamma_lut->data;
- lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ if (degamma_lut) {
+ const struct drm_color_lut *lut = degamma_lut->data;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+
for (i = 0; i < lut_size; i++) {
+ u32 word0, word1;
+
/* Write LUT in U0.14 format. */
word0 =
(drm_color_lut_extract(lut[i].green, 14) << 16) |
@@ -579,10 +549,13 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
}
- if (state->gamma_lut) {
- lut = state->gamma_lut->data;
- lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ if (gamma_lut) {
+ const struct drm_color_lut *lut = gamma_lut->data;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
for (i = 0; i < lut_size; i++) {
+ u32 word0, word1;
+
/* Write LUT in U0.10 format. */
word0 =
(drm_color_lut_extract(lut[i].green, 10) << 16) |
@@ -594,74 +567,100 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
}
- I915_WRITE(CGM_PIPE_MODE(pipe),
- (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
- (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
-
/*
* Also program a linear LUT in the legacy block (behind the
* CGM block).
*/
- i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
+ i9xx_load_luts_internal(crtc_state, NULL);
}
-void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
dev_priv->display.load_luts(crtc_state);
}
-int intel_color_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- size_t gamma_length, degamma_length;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (dev_priv->display.color_commit)
+ dev_priv->display.color_commit(crtc_state);
+}
+
+static int check_lut_size(const struct drm_property_blob *lut, int expected)
+{
+ int len;
+
+ if (!lut)
+ return 0;
+
+ len = drm_color_lut_size(lut);
+ if (len != expected) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ int gamma_length, degamma_length;
+ u32 gamma_tests, degamma_tests;
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
+ gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
- /*
- * We allow both degamma & gamma luts at the right size or
- * NULL.
- */
- if ((!crtc_state->degamma_lut ||
- drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
- (!crtc_state->gamma_lut ||
- drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
+ /* Always allow legacy gamma LUT with no further checking. */
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
return 0;
+ }
- /*
- * We also allow no degamma lut/ctm and a gamma lut at the legacy
- * size (256 entries).
- */
- if (crtc_state_is_legacy_gamma(crtc_state))
- return 0;
+ if (check_lut_size(degamma_lut, degamma_length) ||
+ check_lut_size(gamma_lut, gamma_length))
+ return -EINVAL;
- return -EINVAL;
+ if (drm_color_lut_check(degamma_lut, degamma_tests) ||
+ drm_color_lut_check(gamma_lut, gamma_tests))
+ return -EINVAL;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
+ else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ else
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+ return 0;
}
-void intel_color_init(struct drm_crtc *crtc)
+void intel_color_init(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
dev_priv->display.load_luts = cherryview_load_luts;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
- dev_priv->display.load_luts = haswell_load_luts;
+ dev_priv->display.load_luts = i9xx_load_luts;
+ dev_priv->display.color_commit = hsw_color_commit;
} else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
IS_BROXTON(dev_priv)) {
- dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
+ dev_priv->display.color_commit = hsw_color_commit;
} else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_csc_matrix = ilk_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
+ dev_priv->display.color_commit = hsw_color_commit;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
}
@@ -669,7 +668,7 @@ void intel_color_init(struct drm_crtc *crtc)
/* Enable color management support when we have degamma & gamma LUTs. */
if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
- drm_crtc_enable_color_mgmt(crtc,
+ drm_crtc_enable_color_mgmt(&crtc->base,
INTEL_INFO(dev_priv)->color.degamma_lut_size,
true,
INTEL_INFO(dev_priv)->color.gamma_lut_size);
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index 18e370f607bc..ee16758747c5 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -27,7 +27,6 @@
#include <linux/i2c.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -95,6 +94,10 @@ void intel_connector_destroy(struct drm_connector *connector)
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
+
+ if (intel_connector->port)
+ drm_dp_mst_put_port_malloc(intel_connector->port);
+
kfree(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 68f2fb89ece3..3716b2ee362f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,11 +27,10 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -84,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -322,7 +323,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
@@ -344,51 +345,52 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- return true;
+
+ return 0;
}
-static bool pch_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int pch_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- return true;
+ return 0;
}
-static bool hsw_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int hsw_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
/* HSW/BDW FDI limited to 4k */
if (adjusted_mode->crtc_hdisplay > 4096 ||
adjusted_mode->crtc_hblank_start > 4096)
- return false;
+ return -EINVAL;
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -397,7 +399,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
- return false;
+ return -EINVAL;
}
pipe_config->pipe_bpp = 24;
@@ -406,7 +408,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
/* FDI must always be 2.7 GHz */
pipe_config->port_clock = 135000 * 2;
- return true;
+ return 0;
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -629,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
+intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t save_bclrpat;
- uint32_t save_vtotal;
- uint32_t vtotal, vactive;
- uint32_t vsample;
- uint32_t vblank, vblank_start, vblank_end;
- uint32_t dsl;
+ u32 save_bclrpat;
+ u32 save_vtotal;
+ u32 vtotal, vactive;
+ u32 vsample;
+ u32 vblank, vblank_start, vblank_end;
+ u32 dsl;
i915_reg_t bclrpat_reg, vtotal_reg,
vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
- uint8_t st00;
+ u8 st00;
enum drm_connector_status status;
DRM_DEBUG_KMS("starting load-detect on CRT\n");
@@ -666,8 +668,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev_priv)) {
- uint32_t pipeconf = I915_READ(pipeconf_reg);
+ if (!IS_GEN(dev_priv, 2)) {
+ u32 pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
@@ -688,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- uint32_t vsync = I915_READ(vsync_reg);
- uint32_t vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = I915_READ(vsync_reg);
+ u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
I915_WRITE(vblank_reg,
@@ -777,6 +779,7 @@ intel_crt_detect(struct drm_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
+ intel_wakeref_t wakeref;
int status, ret;
struct intel_load_detect_pipe tmp;
@@ -785,7 +788,8 @@ intel_crt_detect(struct drm_connector *connector,
force);
if (i915_modparams.load_detect_test) {
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
goto load_detect;
}
@@ -793,7 +797,8 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -848,7 +853,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return status;
}
@@ -858,10 +863,12 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
- int ret;
+ intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
+ int ret;
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
@@ -873,7 +880,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -981,7 +988,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a516697bf57d..e8ac04c33e29 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH);
struct intel_css_header {
/* 0x09 for DMC */
- uint32_t module_type;
+ u32 module_type;
/* Includes the DMC specific header in dwords */
- uint32_t header_len;
+ u32 header_len;
/* always value would be 0x10000 */
- uint32_t header_ver;
+ u32 header_ver;
/* Not used */
- uint32_t module_id;
+ u32 module_id;
/* Not used */
- uint32_t module_vendor;
+ u32 module_vendor;
/* in YYYYMMDD format */
- uint32_t date;
+ u32 date;
/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
- uint32_t size;
+ u32 size;
/* Not used */
- uint32_t key_size;
+ u32 key_size;
/* Not used */
- uint32_t modulus_size;
+ u32 modulus_size;
/* Not used */
- uint32_t exponent_size;
+ u32 exponent_size;
/* Not used */
- uint32_t reserved1[12];
+ u32 reserved1[12];
/* Major Minor */
- uint32_t version;
+ u32 version;
/* Not used */
- uint32_t reserved2[8];
+ u32 reserved2[8];
/* Not used */
- uint32_t kernel_header_info;
+ u32 kernel_header_info;
} __packed;
struct intel_fw_info {
- uint16_t reserved1;
+ u16 reserved1;
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
@@ -121,8 +121,8 @@ struct intel_fw_info {
/* Sub-stepping (0, 1, ..., *). * is a wildcard */
char substepping;
- uint32_t offset;
- uint32_t reserved2;
+ u32 offset;
+ u32 reserved2;
} __packed;
struct intel_package_header {
@@ -135,14 +135,14 @@ struct intel_package_header {
unsigned char reserved[10];
/* Number of valid entries in the FWInfo array below */
- uint32_t num_entries;
+ u32 num_entries;
struct intel_fw_info fw_info[20];
} __packed;
struct intel_dmc_header {
/* always value would be 0x40403E3E */
- uint32_t signature;
+ u32 signature;
/* DMC binary header length */
unsigned char header_len;
@@ -151,30 +151,30 @@ struct intel_dmc_header {
unsigned char header_ver;
/* Reserved */
- uint16_t dmcc_ver;
+ u16 dmcc_ver;
/* Major, Minor */
- uint32_t project;
+ u32 project;
/* Firmware program size (excluding header) in dwords */
- uint32_t fw_size;
+ u32 fw_size;
/* Major Minor version */
- uint32_t fw_version;
+ u32 fw_version;
/* Number of valid MMIO cycles present. */
- uint32_t mmio_count;
+ u32 mmio_count;
/* MMIO address */
- uint32_t mmioaddr[8];
+ u32 mmioaddr[8];
/* MMIO data */
- uint32_t mmiodata[8];
+ u32 mmiodata[8];
/* FW filename */
unsigned char dfile[32];
- uint32_t reserved1[2];
+ u32 reserved1[2];
} __packed;
struct stepping_info {
@@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- uint32_t val, mask;
+ u32 val, mask;
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
@@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
- uint32_t i, fw_size;
+ u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
@@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
-static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
+ const struct firmware *fw)
{
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
- uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- uint32_t i;
- uint32_t *dmc_payload;
+ u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+ u32 i;
+ u32 *dmc_payload;
if (!fw)
return NULL;
@@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
+static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(dev_priv->csr.wakeref);
+ dev_priv->csr.wakeref =
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&dev_priv->csr.wakeref);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+}
+
static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
@@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work)
if (dev_priv->csr.dmc_payload) {
intel_csr_load_program(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_put(dev_priv);
DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
dev_priv->csr.fw_path,
@@ -467,7 +481,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working CSR for whatever
* reason.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
@@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
/* Drop the reference held in case DMC isn't loaded. */
if (!dev_priv->csr.dmc_payload)
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_put(dev_priv);
}
/**
@@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
* loaded.
*/
if (!dev_priv->csr.dmc_payload)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_get(dev_priv);
}
/**
@@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
+ WARN_ON(dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 7edce1b7b348..ca705546a0ab 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -974,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
-static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -995,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
}
-static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
int clock = crtc_state->port_clock;
@@ -1004,10 +1004,11 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
switch (id) {
default:
+ /*
+ * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used
+ * here, so do warn if this get passed in
+ */
MISSING_CASE(id);
- /* fall through */
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
return DDI_CLK_SEL_NONE;
case DPLL_ID_ICL_TBTPLL:
switch (clock) {
@@ -1243,8 +1244,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
- uint32_t cfgcr1_val, cfgcr2_val;
- uint32_t p0, p1, p2, dco_freq;
+ u32 cfgcr1_val, cfgcr2_val;
+ u32 p0, p1, p2, dco_freq;
cfgcr1_reg = DPLL_CFGCR1(pll_id);
cfgcr2_reg = DPLL_CFGCR2(pll_id);
@@ -1296,14 +1297,17 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1000) / 0x8000;
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
return dco_freq / (p0 * p1 * p2 * 5);
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
- uint32_t cfgcr0, cfgcr1;
- uint32_t p0, p1, p2, dco_freq, ref_clock;
+ u32 cfgcr0, cfgcr1;
+ u32 p0, p1, p2, dco_freq, ref_clock;
if (INTEL_GEN(dev_priv) >= 11) {
cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
@@ -1388,16 +1392,17 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
u32 mg_pll_div0, mg_clktop_hsclkctl;
u32 m1, m2_int, m2_frac, div1, div2, refclk;
u64 tmp;
refclk = dev_priv->cdclk.hw.ref;
- mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
- mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+ mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
- m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK;
+ m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
(mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
@@ -1468,7 +1473,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int link_clock = 0;
- uint32_t pll_id;
+ u32 pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
@@ -1493,7 +1498,7 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t cfgcr0;
+ u32 cfgcr0;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1547,7 +1552,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t dpll_ctl1;
+ u32 dpll_ctl1;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1736,7 +1741,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- uint32_t temp;
+ u32 temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1754,7 +1759,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- uint32_t temp;
+ u32 temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
@@ -1815,7 +1820,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_DVI;
if (crtc_state->hdmi_scrambling)
- temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+ temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
@@ -1838,7 +1843,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- uint32_t val = I915_READ(reg);
+ u32 val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
@@ -1857,12 +1862,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
enum pipe pipe = 0;
int ret = 0;
- uint32_t tmp;
+ u32 tmp;
- if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
- intel_encoder->power_domain)))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ intel_encoder->power_domain);
+ if (WARN_ON(!wakeref))
return -ENXIO;
if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
@@ -1877,7 +1884,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -1888,13 +1895,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
- enum pipe pipe = 0;
enum transcoder cpu_transcoder;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ enum pipe pipe = 0;
+ u32 tmp;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
if (!encoder->get_hw_state(encoder, &pipe)) {
@@ -1936,7 +1945,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -1947,6 +1956,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
+ intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
u8 mst_pipe_mask;
@@ -1954,8 +1964,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*pipe_mask = 0;
*is_dp_mst = false;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return;
tmp = I915_READ(DDI_BUF_CTL(port));
@@ -2026,7 +2037,7 @@ out:
"(PHY_CTL %08x)\n", port_name(port), tmp);
}
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2123,7 +2134,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
- enum port port, uint8_t iboost)
+ enum port port, u8 iboost)
{
u32 tmp;
@@ -2142,7 +2153,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- uint8_t iboost;
+ u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
@@ -2656,7 +2667,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
-static uint32_t translate_signal_level(int signal_levels)
+static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2671,9 +2682,9 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
-static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
+static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
{
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -2698,7 +2709,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+u32 ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2712,8 +2723,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
}
static inline
-uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum port port)
+u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+ enum port port)
{
if (intel_port_is_combophy(dev_priv, port)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
@@ -2848,7 +2859,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- uint32_t val;
+ u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
@@ -2859,7 +2870,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_ICELAKE(dev_priv)) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_pll_sel(encoder, crtc_state));
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -3283,7 +3294,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@@ -3303,7 +3315,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3345,7 +3358,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t val;
+ u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -3537,6 +3550,26 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
+static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_psr_enable(intel_dp, crtc_state);
+ intel_edp_drrs_enable(intel_dp, crtc_state);
+
+ intel_panel_update_backlight(encoder, crtc_state, conn_state);
+}
+
+static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+}
+
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
enum port port)
@@ -3605,8 +3638,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
- intel_display_power_put(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3615,7 +3648,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- uint32_t val;
+ u32 val;
bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
@@ -3727,8 +3760,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
- if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
- TRANS_DDI_HDMI_SCRAMBLING_MASK)
+ if (temp & TRANS_DDI_HDMI_SCRAMBLING)
pipe_config->hdmi_scrambling = true;
if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
pipe_config->hdmi_high_tmds_clock_ratio = true;
@@ -3809,9 +3841,9 @@ intel_ddi_compute_output_type(struct intel_encoder *encoder,
}
}
-static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3835,9 +3867,50 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
+static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_dp_encoder_suspend(encoder);
+
+ /*
+ * TODO: disconnect also from USB DP alternate mode once we have a
+ * way to handle the modeset restore in that mode during resume
+ * even if the sink has disappeared while being suspended.
+ */
+ if (dig_port->tc_legacy_port)
+ icl_tc_phy_disconnect(i915, dig_port);
+}
+
+static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
+ struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
+
+ if (intel_port_is_tc(i915, dig_port->base.port))
+ intel_digital_port_connected(&dig_port->base);
+
+ intel_dp_encoder_reset(drm_encoder);
+}
+
+static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
+
+ intel_dp_encoder_flush_work(encoder);
+
+ if (intel_port_is_tc(i915, dig_port->base.port))
+ icl_tc_phy_disconnect(i915, dig_port);
+
+ drm_encoder_cleanup(encoder);
+ kfree(dig_port);
+}
+
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_dp_encoder_reset,
- .destroy = intel_dp_encoder_destroy,
+ .reset = intel_ddi_encoder_reset,
+ .destroy = intel_ddi_encoder_destroy,
};
static struct intel_connector *
@@ -4081,16 +4154,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
+ struct ddi_vbt_port_info *port_info =
+ &dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
-
- init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
- dev_priv->vbt.ddi_port_info[port].supports_hdmi);
- init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+ init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
+ init_dp = port_info->supports_dp;
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -4129,9 +4202,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_dp_encoder_suspend;
+ intel_encoder->suspend = intel_ddi_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4150,6 +4224,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
+ !port_info->supports_typec_usb &&
+ !port_info->supports_tbt;
+
switch (port) {
case PORT_A:
intel_dig_port->ddi_io_power_domain =
@@ -4208,6 +4286,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
intel_infoframe_init(intel_dig_port);
+
+ if (intel_port_is_tc(dev_priv, port))
+ intel_digital_port_connected(intel_encoder);
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1e56319334f3..855a5074ad77 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
@@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
info->cs_timestamp_frequency_khz);
}
-void intel_device_info_dump(const struct intel_device_info *info,
- struct drm_printer *p)
-{
- struct drm_i915_private *dev_priv =
- container_of(info, struct drm_i915_private, info);
-
- drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
- INTEL_DEVID(dev_priv),
- INTEL_REVID(dev_priv),
- intel_platform_name(info->platform),
- info->gen);
-
- intel_device_info_dump_flags(info, p);
-}
-
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
@@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
u32 ss_en, ss_en_mask;
u8 eu_en;
@@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
const u32 fuse2 = I915_READ(GEN8_FUSE2);
int s, ss;
const int eu_mask = 0xff;
@@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
@@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &info->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, eu_disable, subslice_mask;
const u8 eu_mask = 0xff;
@@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
@@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &info->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
int s, ss;
@@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
* There isn't a register to tell us how many slices/subslices. We
* work off the PCI-ids here.
*/
- switch (info->gt) {
+ switch (INTEL_INFO(dev_priv)->gt) {
default:
- MISSING_CASE(info->gt);
+ MISSING_CASE(INTEL_INFO(dev_priv)->gt);
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
@@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
/**
* intel_device_info_runtime_init - initialize runtime info
- * @info: intel device info struct
+ * @dev_priv: the i915 device
*
* Determine various intel_device_info fields at runtime.
*
@@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct intel_device_info *info)
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(info, struct drm_i915_private, info);
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
- info->num_scalers[pipe] = 2;
- } else if (IS_GEN9(dev_priv)) {
- info->num_scalers[PIPE_A] = 2;
- info->num_scalers[PIPE_B] = 2;
- info->num_scalers[PIPE_C] = 1;
+ runtime->num_scalers[pipe] = 2;
+ } else if (IS_GEN(dev_priv, 9)) {
+ runtime->num_scalers[PIPE_A] = 2;
+ runtime->num_scalers[PIPE_B] = 2;
+ runtime->num_scalers[PIPE_C] = 1;
}
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
- if (IS_GEN11(dev_priv))
+ if (IS_GEN(dev_priv, 11))
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 6;
- else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+ runtime->num_sprites[pipe] = 6;
+ else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 3;
+ runtime->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
@@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
* down the line.
*/
- info->num_sprites[PIPE_A] = 2;
- info->num_sprites[PIPE_B] = 2;
- info->num_sprites[PIPE_C] = 1;
+ runtime->num_sprites[PIPE_A] = 2;
+ runtime->num_sprites[PIPE_B] = 2;
+ runtime->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 2;
+ runtime->num_sprites[pipe] = 2;
} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 1;
+ runtime->num_sprites[pipe] = 1;
}
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+ (IS_GEN_RANGE(dev_priv, 7, 8)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (IS_GEN9(dev_priv))
+ else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
- else if (IS_GEN10(dev_priv))
+ else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+ if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
info->ppgtt = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
- info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+ runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- u32 media_fuse;
+ unsigned int logical_vdbox = 0;
unsigned int i;
+ u32 media_fuse;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
+ DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & info->vdbox_enable)) {
+ if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ continue;
}
+
+ /*
+ * In Gen11, only even numbered logical VDBOXes are
+ * hooked up to an SFC (Scaler & Format Converter) unit.
+ */
+ if (logical_vdbox++ % 2 == 0)
+ RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
- DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
+ DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & info->vebox_enable)) {
+ if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1caf24e2cf0b..e8b8661df746 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,6 +89,7 @@ enum intel_ppgtt {
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
@@ -114,7 +115,7 @@ enum intel_ppgtt {
func(has_ddi); \
func(has_dp_mst); \
func(has_fbc); \
- func(has_gmch_display); \
+ func(has_gmch); \
func(has_hotplug); \
func(has_ipc); \
func(has_overlay); \
@@ -152,12 +153,10 @@ struct sseu_dev_info {
typedef u8 intel_ring_mask_t;
struct intel_device_info {
- u16 device_id;
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
@@ -169,8 +168,6 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -189,6 +186,22 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int cursor_offsets[I915_MAX_PIPES];
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ u32 degamma_lut_tests;
+ u32 gamma_lut_tests;
+ } color;
+};
+
+struct intel_runtime_info {
+ u16 device_id;
+
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ u8 num_rings;
+
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
@@ -198,10 +211,8 @@ struct intel_device_info {
u8 vdbox_enable;
u8 vebox_enable;
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
+ /* Media engine access to SFC per instance */
+ u8 vdbox_sfc_access;
};
struct intel_driver_caps {
@@ -258,12 +269,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct intel_device_info *info);
-void intel_device_info_dump(const struct intel_device_info *info,
- struct drm_printer *p);
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 248128126422..ccb616351bba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,33 +24,44 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/module.h>
-#include <linux/input.h>
#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/intel-iommu.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/reservation.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drmP.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_frontbuffer.h"
+
+#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_reset.h"
#include "i915_trace.h"
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_rect.h>
-#include <drm/drm_atomic_uapi.h>
-#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
/* Primary plane formats for gen <= 3 */
-static const uint32_t i8xx_primary_formats[] = {
+static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
@@ -58,7 +69,7 @@ static const uint32_t i8xx_primary_formats[] = {
};
/* Primary plane formats for gen >= 4 */
-static const uint32_t i965_primary_formats[] = {
+static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -67,18 +78,18 @@ static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_XBGR2101010,
};
-static const uint64_t i9xx_format_modifiers[] = {
+static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* Cursor formats */
-static const uint32_t intel_cursor_formats[] = {
+static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static const uint64_t cursor_format_modifiers[] = {
+static const u64 cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -494,7 +505,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
{
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
@@ -529,8 +540,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
- clock->n << 22);
+ clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
+ clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
return clock->dot / 5;
@@ -892,7 +903,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
- uint64_t m2;
+ u64 m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
@@ -914,7 +925,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2;
- m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+ m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
clock.n) << 22, refclk * clock.m1);
if (m2 > INT_MAX/clock.m1)
@@ -984,7 +995,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1110,7 +1121,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1198,17 +1209,19 @@ void assert_pipe(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wakeref) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
cur_state = false;
}
@@ -1609,7 +1622,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- uint32_t val, pipeconf_val;
+ u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
@@ -1697,7 +1710,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t reg;
- uint32_t val;
+ u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1754,6 +1767,35 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
+static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (INTEL_GEN(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+}
+
static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
@@ -1772,7 +1814,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH_DISPLAY(dev_priv)) {
+ if (HAS_GMCH(dev_priv)) {
if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -1806,7 +1848,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
* when it's derived from the timestamps. So let's wait for the
* pipe to start properly before we call drm_crtc_vblank_on()
*/
- if (dev_priv->drm.max_vblank_count == 0)
+ if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -1850,7 +1892,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
- return IS_GEN2(dev_priv) ? 2048 : 4096;
+ return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
static unsigned int
@@ -1863,7 +1905,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case DRM_FORMAT_MOD_LINEAR:
return cpp;
case I915_FORMAT_MOD_X_TILED:
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return 128;
else
return 512;
@@ -1872,7 +1914,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
/* fall through */
case I915_FORMAT_MOD_Y_TILED:
- if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+ if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
@@ -2024,6 +2066,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
@@ -2047,7 +2090,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
@@ -2060,7 +2103,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
vma = i915_gem_object_pin_to_display_plane(obj,
@@ -2102,7 +2145,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return vma;
}
@@ -2373,7 +2416,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
return 0;
}
-static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case I915_FORMAT_MOD_X_TILED:
@@ -3161,7 +3204,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- if (!HAS_GMCH_DISPLAY(dev_priv)) {
+ if (!HAS_GMCH(dev_priv)) {
return 32*1024;
} else if (INTEL_GEN(dev_priv) >= 4) {
if (modifier == I915_FORMAT_MOD_X_TILED)
@@ -3181,28 +3224,38 @@ i9xx_plane_max_stride(struct intel_plane *plane,
}
}
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
+
+ if (INTEL_GEN(dev_priv) < 5)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
+
+ return dspcntr;
+}
+
static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
u32 dspcntr;
- dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
+ dspcntr = DISPLAY_PLANE_ENABLE;
- if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
- IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- if (INTEL_GEN(dev_priv) < 5)
- dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
-
switch (fb->format->format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
@@ -3330,11 +3383,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
- u32 dspcntr = plane_state->ctl;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
u32 dspaddr_offset;
+ u32 dspcntr;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
@@ -3394,10 +3449,23 @@ static void i9xx_disable_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
unsigned long irqflags;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
@@ -3412,6 +3480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
bool ret;
u32 val;
@@ -3421,7 +3490,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
val = I915_READ(DSPCNTR(i9xx_plane));
@@ -3434,7 +3504,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -3503,7 +3573,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
-static u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
@@ -3573,7 +3643,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
}
}
-static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -3632,6 +3702,20 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
return 0;
}
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 plane_ctl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ return plane_ctl;
+
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ return plane_ctl;
+}
+
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -3646,10 +3730,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
plane_ctl |= skl_plane_ctl_alpha(plane_state);
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -3674,19 +3755,27 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 plane_color_ctl = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ return plane_color_ctl;
+
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+ return plane_color_ctl;
+}
+
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
- if (INTEL_GEN(dev_priv) < 11) {
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
- }
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
@@ -3735,7 +3824,7 @@ __intel_display_resume(struct drm_device *dev,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH_DISPLAY(to_i915(dev)))
+ if (!HAS_GMCH(to_i915(dev)))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
@@ -3746,8 +3835,8 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
- return intel_has_gpu_reset(dev_priv) &&
- INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+ return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+ intel_has_gpu_reset(dev_priv));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -3860,6 +3949,30 @@ unlock:
clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
+static void icl_set_pipe_chicken(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+
+ tmp = I915_READ(PIPE_CHICKEN(pipe));
+
+ /*
+ * Display WA #1153: icl
+ * enable hardware to bypass the alpha math
+ * and rounding for per-pixel values 00 and 0xff
+ */
+ tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
+
+ /*
+ * W/A for underruns with linear/X-tiled with
+ * WM1+ disabled.
+ */
+ tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
+
+ I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+}
+
static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
@@ -3894,6 +4007,19 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(old_crtc_state);
}
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black, but apply pipe gamma and CSC so that its
+ * handling will match how we program our planes.
+ */
+ if (INTEL_GEN(dev_priv) >= 9)
+ I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
+ SKL_BOTTOM_COLOR_GAMMA_ENABLE |
+ SKL_BOTTOM_COLOR_CSC_ENABLE);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(crtc);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4120,7 +4246,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -4593,7 +4719,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
- uint32_t temp;
+ u32 temp;
temp = I915_READ(SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
@@ -4919,10 +5045,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (IS_GEN11(dev_priv) &&
+ (IS_GEN(dev_priv, 11) &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (!IS_GEN11(dev_priv) &&
+ (!IS_GEN(dev_priv, 11) &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5213,7 +5339,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5234,7 +5360,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
hsw_disable_ips(to_intel_crtc_state(crtc->state));
@@ -5248,7 +5374,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev_priv) &&
+ if (HAS_GMCH(dev_priv) &&
intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -5256,18 +5382,36 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
if (!old_crtc_state->ips_enabled)
return false;
if (needs_modeset(&new_crtc_state->base))
return true;
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Disable IPS before we program the LUT.
+ */
+ if (IS_HASWELL(dev_priv) &&
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
return !new_crtc_state->ips_enabled;
}
static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
if (!new_crtc_state->ips_enabled)
return false;
@@ -5275,6 +5419,18 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
return true;
/*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Re-enable IPS after the LUT has been programmed.
+ */
+ if (IS_HASWELL(dev_priv) &&
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ /*
* We can't read out IPS on broadwell, assume the worst and
* forcibly enable IPS on the first fastset.
*/
@@ -5292,7 +5448,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
/* WA Display #0827: Gen9:all */
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
return true;
return false;
@@ -5365,7 +5521,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN2(dev_priv) && old_primary_state->visible &&
+ if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
(modeset || !new_primary_state->base.visible))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
}
@@ -5385,7 +5541,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
+ if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, crtc->pipe);
@@ -5578,6 +5734,26 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
}
}
+static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->update_pipe)
+ encoder->update_pipe(encoder, crtc_state, conn_state);
+ }
+}
+
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5641,7 +5817,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
+ intel_color_commit(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5651,7 +5828,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
+ intel_crtc_vblank_on(pipe_config);
intel_encoders_enable(crtc, pipe_config, old_state);
@@ -5696,7 +5873,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = MBUS_DBOX_A_CREDIT(2);
val |= MBUS_DBOX_BW_CREDIT(1);
@@ -5716,7 +5893,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
- u32 pipe_chicken;
if (WARN_ON(intel_crtc->active))
return;
@@ -5752,8 +5928,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
haswell_set_pipemisc(pipe_config);
- intel_color_set_csc(&pipe_config->base);
-
intel_crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
@@ -5771,18 +5945,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
+ intel_color_commit(pipe_config);
- /*
- * Display WA #1153: enable hardware to bypass the alpha math
- * and rounding for per-pixel values 00 and 0xff
- */
- if (INTEL_GEN(dev_priv) >= 11) {
- pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
- if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
- I915_WRITE_FW(PIPE_CHICKEN(pipe),
- pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
- }
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(intel_crtc);
intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5805,7 +5972,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
+ intel_crtc_vblank_on(pipe_config);
intel_encoders_enable(crtc, pipe_config, old_state);
@@ -6087,7 +6254,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain;
for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
+ intel_display_power_put_unchecked(dev_priv, domain);
}
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6117,8 +6284,6 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_set_pipeconf(pipe_config);
- intel_color_set_csc(&pipe_config->base);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -6137,14 +6302,15 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_pfit_enable(pipe_config);
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
+ intel_color_commit(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
+ intel_crtc_vblank_on(pipe_config);
intel_encoders_enable(crtc, pipe_config, old_state);
}
@@ -6184,7 +6350,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6193,7 +6359,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_pfit_enable(pipe_config);
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
+ intel_color_commit(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@@ -6203,7 +6370,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
- drm_crtc_vblank_on(crtc);
+ intel_crtc_vblank_on(pipe_config);
intel_encoders_enable(crtc, pipe_config, old_state);
}
@@ -6236,7 +6403,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6261,7 +6428,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
@@ -6334,7 +6501,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
+ intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
@@ -6600,9 +6767,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
-static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
- uint32_t pixel_rate;
+ u32 pixel_rate;
pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
@@ -6612,8 +6779,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
*/
if (pipe_config->pch_pfit.enabled) {
- uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = pipe_config->pch_pfit.size;
+ u64 pipe_w, pipe_h, pfit_w, pfit_h;
+ u32 pfit_size = pipe_config->pch_pfit.size;
pipe_w = pipe_config->pipe_src_w;
pipe_h = pipe_config->pipe_src_h;
@@ -6628,7 +6795,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
- pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+ pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
@@ -6639,7 +6806,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
crtc_state->base.adjusted_mode.crtc_clock;
@@ -6724,7 +6891,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
static void
-intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+intel_reduce_m_n_ratio(u32 *num, u32 *den)
{
while (*num > DATA_LINK_M_N_MASK ||
*den > DATA_LINK_M_N_MASK) {
@@ -6734,7 +6901,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
}
static void compute_m_n(unsigned int m, unsigned int n,
- uint32_t *ret_m, uint32_t *ret_n,
+ u32 *ret_m, u32 *ret_n,
bool constant_n)
{
/*
@@ -6749,7 +6916,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
- *ret_m = div_u64((uint64_t) m * *ret_n, n);
+ *ret_m = div_u64((u64)m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@@ -6779,12 +6946,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
}
-static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
{
return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
}
@@ -6868,7 +7035,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
* Strictly speaking some registers are available before
* gen7, but we only support DRRS on gen7+
*/
- return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+ return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
}
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -7340,7 +7507,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- uint32_t crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -7415,7 +7582,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
@@ -7486,7 +7653,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t pipeconf;
+ u32 pipeconf;
pipeconf = 0;
@@ -7731,7 +7898,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t tmp;
+ u32 tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
@@ -7946,11 +8113,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -8051,7 +8220,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -8225,7 +8394,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
@@ -8247,7 +8416,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
/* WaMPhyProgramming:hsw */
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -8328,7 +8497,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
bool with_spread, bool with_fdi)
{
- uint32_t reg, tmp;
+ u32 reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
@@ -8367,7 +8536,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
- uint32_t reg, tmp;
+ u32 reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -8392,7 +8561,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
#define BEND_IDX(steps) ((50 + (steps)) / 5)
-static const uint16_t sscdivintphase[] = {
+static const u16 sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
@@ -8424,7 +8593,7 @@ static const uint16_t sscdivintphase[] = {
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
- uint32_t tmp;
+ u32 tmp;
int idx = BEND_IDX(steps);
if (WARN_ON(steps % 5 != 0))
@@ -8490,7 +8659,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = 0;
@@ -8837,7 +9006,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
- uint32_t ps_ctrl = 0;
+ u32 ps_ctrl = 0;
int id = -1;
int i;
@@ -8849,6 +9018,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+ scaler_state->scalers[i].in_use = true;
break;
}
}
@@ -8993,7 +9163,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9005,7 +9175,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9018,11 +9188,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -9105,7 +9277,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -9145,7 +9317,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
-static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
@@ -9153,7 +9325,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
return I915_READ(D_COMP_BDW);
}
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
@@ -9178,7 +9350,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
- uint32_t val;
+ u32 val;
assert_can_disable_lcpll(dev_priv);
@@ -9225,7 +9397,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
*/
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
val = I915_READ(LCPLL_CTL);
@@ -9300,7 +9472,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9316,7 +9488,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9384,7 +9556,7 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
if (WARN_ON(!intel_dpll_is_combophy(id)))
return;
} else if (intel_port_is_tc(dev_priv, port)) {
- id = icl_port_to_mg_pll_id(port);
+ id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
} else {
WARN(1, "Invalid port %x\n", port);
return;
@@ -9438,7 +9610,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
- uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+ u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@@ -9495,7 +9667,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ for_each_set_bit(panel_transcoder,
+ &panel_transcoder_mask,
+ ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
enum pipe trans_pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@@ -9541,6 +9715,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9568,6 +9744,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
+
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
/*
@@ -9602,7 +9780,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
@@ -9684,7 +9862,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ WARN_ON(power_domain_mask & BIT_ULL(power_domain));
power_domain_mask |= BIT_ULL(power_domain);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -9714,7 +9894,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put_unchecked(dev_priv, power_domain);
return active;
}
@@ -9735,7 +9915,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
base += plane_state->color_plane[0].offset;
/* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev_priv) &&
+ if (HAS_GMCH(dev_priv) &&
plane_state->base.rotation & DRM_MODE_ROTATE_180)
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * fb->format->cpp[0];
@@ -9848,11 +10028,15 @@ i845_cursor_max_stride(struct intel_plane *plane,
return 2048;
}
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ return CURSOR_GAMMA_ENABLE;
+}
+
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
return CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB |
CURSOR_STRIDE(plane_state->color_plane[0].stride);
}
@@ -9922,7 +10106,9 @@ static void i845_update_cursor(struct intel_plane *plane,
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
- cntl = plane_state->ctl;
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
size = (height << 12) | width;
base = intel_cursor_base(plane_state);
@@ -9964,17 +10150,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -9987,27 +10175,36 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
return plane->base.dev->mode_config.cursor_width * 4;
}
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
- if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
- cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+ if (INTEL_GEN(dev_priv) >= 11)
+ return cntl;
- if (INTEL_GEN(dev_priv) <= 10) {
- cntl |= MCURSOR_GAMMA_ENABLE;
+ cntl |= MCURSOR_GAMMA_ENABLE;
- if (HAS_DDI(dev_priv))
- cntl |= MCURSOR_PIPE_CSC_ENABLE;
- }
+ if (HAS_DDI(dev_priv))
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
switch (plane_state->base.crtc_w) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
@@ -10132,7 +10329,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
- cntl = plane_state->ctl;
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
if (plane_state->base.crtc_h != plane_state->base.crtc_w)
fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
@@ -10197,6 +10395,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
u32 val;
@@ -10206,7 +10405,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
val = I915_READ(CURCNTR(plane->pipe));
@@ -10219,7 +10419,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
MCURSOR_PIPE_SELECT_SHIFT;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -10468,7 +10668,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev_priv))
+ else if (!IS_GEN(dev_priv, 2))
return 96000;
else
return 48000;
@@ -10501,7 +10701,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev_priv)) {
+ if (!IS_GEN(dev_priv, 2)) {
if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -10653,20 +10853,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
/**
* intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
+ * @cur: current plane state
+ * @new: new plane state
*
* Check current plane state versus the new one to determine whether
* watermarks need to be recalculated.
*
* Returns true or false.
*/
-static bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state)
+static bool intel_wm_need_update(struct intel_plane_state *cur,
+ struct intel_plane_state *new)
{
- struct intel_plane_state *new = to_intel_plane_state(state);
- struct intel_plane_state *cur = to_intel_plane_state(plane->state);
-
/* Update watermarks on tiling or size changes. */
if (new->base.visible != cur->base.visible)
return true;
@@ -10775,7 +10972,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(&plane->base, plane_state)) {
+ } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
+ to_intel_plane_state(plane_state))) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
@@ -10815,9 +11013,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* Despite the w/a only being listed for IVB we assume that
* the ILK/SNB note has similar ramifications, hence we apply
* the w/a on all three platforms.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
*/
- if (plane->id == PLANE_SPRITE0 &&
- (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+ if (plane->id != PLANE_CURSOR &&
+ (IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
needs_scaling(to_intel_plane_state(plane_state)))))
@@ -10954,15 +11155,15 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
int ret;
bool mode_changed = needs_modeset(crtc_state);
- if (mode_changed && !crtc_state->active)
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ mode_changed && !crtc_state->active)
pipe_config->update_wm_post = true;
if (mode_changed && crtc_state->enable &&
@@ -10974,8 +11175,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
- if (crtc_state->color_mgmt_changed) {
- ret = intel_color_check(crtc, crtc_state);
+ if (mode_changed || crtc_state->color_mgmt_changed) {
+ ret = intel_color_check(pipe_config);
if (ret)
return ret;
@@ -11004,9 +11205,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(dev,
- intel_crtc,
- pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(pipe_config);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11014,7 +11213,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed)
+ if (mode_changed || pipe_config->update_pipe)
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
@@ -11275,7 +11474,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -11387,44 +11586,38 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
return ret;
}
-static void
+static int
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc_scaler_state scaler_state;
- struct intel_dpll_hw_state dpll_hw_state;
- struct intel_shared_dpll *shared_dpll;
- struct intel_crtc_wm_state wm_state;
- bool force_thru, ips_force_disable;
+ struct intel_crtc_state *saved_state;
+
+ saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+ if (!saved_state)
+ return -ENOMEM;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
- scaler_state = crtc_state->scaler_state;
- shared_dpll = crtc_state->shared_dpll;
- dpll_hw_state = crtc_state->dpll_hw_state;
- force_thru = crtc_state->pch_pfit.force_thru;
- ips_force_disable = crtc_state->ips_force_disable;
+ saved_state->scaler_state = crtc_state->scaler_state;
+ saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
+ saved_state->ips_force_disable = crtc_state->ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- wm_state = crtc_state->wm;
+ saved_state->wm = crtc_state->wm;
/* Keep base drm_crtc_state intact, only clear our extended struct */
BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
- memset(&crtc_state->base + 1, 0,
+ memcpy(&crtc_state->base + 1, &saved_state->base + 1,
sizeof(*crtc_state) - sizeof(crtc_state->base));
- crtc_state->scaler_state = scaler_state;
- crtc_state->shared_dpll = shared_dpll;
- crtc_state->dpll_hw_state = dpll_hw_state;
- crtc_state->pch_pfit.force_thru = force_thru;
- crtc_state->ips_force_disable = ips_force_disable;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- crtc_state->wm = wm_state;
+ kfree(saved_state);
+ return 0;
}
static int
@@ -11439,7 +11632,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
int i;
bool retry = true;
- clear_intel_crtc_state(pipe_config);
+ ret = clear_intel_crtc_state(pipe_config);
+ if (ret)
+ return ret;
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -11517,10 +11712,13 @@ encoder_retry:
continue;
encoder = to_intel_encoder(connector_state->best_encoder);
-
- if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
- DRM_DEBUG_KMS("Encoder config failure\n");
- return -EINVAL;
+ ret = encoder->compute_config(encoder, pipe_config,
+ connector_state);
+ if (ret < 0) {
+ if (ret != -EDEADLK)
+ DRM_DEBUG_KMS("Encoder config failure: %d\n",
+ ret);
+ return ret;
}
}
@@ -11645,6 +11843,23 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...)
va_end(args);
}
+static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+{
+ if (i915_modparams.fastboot != -1)
+ return i915_modparams.fastboot;
+
+ /* Enable fastboot by default on Skylake and newer */
+ if (INTEL_GEN(dev_priv) >= 9)
+ return true;
+
+ /* Enable fastboot by default on VLV and CHV */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return true;
+
+ /* Disabled by default on all others */
+ return false;
+}
+
static bool
intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
@@ -11656,6 +11871,11 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ if (fixup_inherited && !fastboot_enabled(dev_priv)) {
+ DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+ ret = false;
+ }
+
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
@@ -11964,7 +12184,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
- skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
@@ -12378,7 +12598,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
@@ -12619,9 +12839,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
-static int calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* Is there platform-specific watermark information to calculate? */
@@ -12679,8 +12899,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
- if (i915_modparams.fastboot &&
- intel_pipe_config_compare(dev_priv,
+ if (intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(old_crtc_state),
pipe_config, true)) {
crtc_state->mode_changed = false;
@@ -12695,6 +12914,10 @@ static int intel_atomic_check(struct drm_device *dev,
"[modeset]" : "[fastset]");
}
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ return ret;
+
if (any_ms) {
ret = intel_modeset_checks(state);
@@ -12713,7 +12936,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
intel_fbc_choose_crtc(dev_priv, intel_state);
- return calc_watermark_data(state);
+ return calc_watermark_data(intel_state);
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -12725,8 +12948,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
- if (!dev->max_vblank_count)
+ if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
return dev->driver->get_vblank_counter(dev, crtc->pipe);
@@ -12755,9 +12979,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
} else {
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
pipe_config);
+
+ if (pipe_config->update_pipe)
+ intel_encoders_update_pipe(crtc, pipe_config, state);
}
- if (new_plane_state)
+ if (pipe_config->update_pipe && !pipe_config->enable_fbc)
+ intel_fbc_disable(intel_crtc);
+ else if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
intel_begin_crtc_commit(crtc, old_crtc_state);
@@ -12930,6 +13159,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
+ intel_wakeref_t wakeref = 0;
int i;
intel_atomic_commit_fence_wait(intel_state);
@@ -12937,7 +13167,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
- intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
@@ -12980,7 +13210,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
/* FIXME unify this for all platforms */
if (!new_crtc_state->active &&
- !HAS_GMCH_DISPLAY(dev_priv) &&
+ !HAS_GMCH(dev_priv) &&
dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(intel_state,
new_intel_crtc_state);
@@ -13034,6 +13264,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
drm_atomic_helper_wait_for_flip_done(dev, state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+
+ if (new_crtc_state->active &&
+ !needs_modeset(new_crtc_state) &&
+ (new_intel_crtc_state->base.color_mgmt_changed ||
+ new_intel_crtc_state->update_pipe))
+ intel_color_load_luts(new_intel_crtc_state);
+ }
+
/*
* Now that the vblank has passed, we can go ahead and program the
* optimal watermarks on platforms that need two-step watermark
@@ -13074,7 +13314,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
/*
@@ -13549,19 +13789,16 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
bool modeset = needs_modeset(&intel_cstate->base);
- if (!modeset &&
- (intel_cstate->base.color_mgmt_changed ||
- intel_cstate->update_pipe)) {
- intel_color_set_csc(&intel_cstate->base);
- intel_color_load_luts(&intel_cstate->base);
- }
-
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_cstate);
if (modeset)
goto out;
+ if (intel_cstate->base.color_mgmt_changed ||
+ intel_cstate->update_pipe)
+ intel_color_commit(intel_cstate);
+
if (intel_cstate->update_pipe)
intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
@@ -13578,7 +13815,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -13702,8 +13939,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -14040,7 +14277,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i;
- crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
if (!crtc->num_scalers)
return;
@@ -14126,7 +14363,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
- intel_color_init(&intel_crtc->base);
+ intel_color_init(intel_crtc);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -14177,7 +14414,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
return index_mask;
}
-static bool has_edp_a(struct drm_i915_private *dev_priv)
+static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
{
if (!IS_MOBILE(dev_priv))
return false;
@@ -14185,13 +14422,13 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
}
-static bool intel_crt_present(struct drm_i915_private *dev_priv)
+static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) >= 9)
return false;
@@ -14199,15 +14436,12 @@ static bool intel_crt_present(struct drm_i915_private *dev_priv)
if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
return false;
- if (IS_CHERRYVIEW(dev_priv))
- return false;
-
if (HAS_PCH_LPT_H(dev_priv) &&
I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -14262,23 +14496,21 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- /*
- * intel_edp_init_connector() depends on this completing first, to
- * prevent the registeration of both eDP and LVDS and the incorrect
- * sharing of the PPS.
- */
- intel_lvds_init(dev_priv);
-
- if (intel_crt_present(dev_priv))
- intel_crt_init(dev_priv);
-
if (IS_ICELAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
- intel_ddi_init(dev_priv, PORT_F);
+ /*
+ * On some ICL SKUs port F is not present. No strap bits for
+ * this, so rely on VBT.
+ * Work around broken VBTs on SKUs known to have no port F.
+ */
+ if (IS_ICL_WITH_PORT_F(dev_priv) &&
+ intel_bios_is_port_present(dev_priv, PORT_F))
+ intel_ddi_init(dev_priv, PORT_F);
+
icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
@@ -14294,6 +14526,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_DDI(dev_priv)) {
int found;
+ if (intel_ddi_crt_present(dev_priv))
+ intel_crt_init(dev_priv);
+
/*
* Haswell uses DDI functions to detect digital outputs.
* On SKL pre-D0 the strap isn't connected, so we assume
@@ -14320,16 +14555,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_GEN9_BC(dev_priv) &&
- (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
- dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
- dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+ intel_bios_is_port_present(dev_priv, PORT_E))
intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
+
+ /*
+ * intel_edp_init_connector() depends on this completing first,
+ * to prevent the registration of both eDP and LVDS and the
+ * incorrect sharing of the PPS.
+ */
+ intel_lvds_init(dev_priv);
+ intel_crt_init(dev_priv);
+
dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
- if (has_edp_a(dev_priv))
+ if (ilk_has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
@@ -14355,6 +14597,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+ intel_crt_init(dev_priv);
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14397,9 +14642,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
}
vlv_dsi_init(dev_priv);
- } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
+ } else if (IS_PINEVIEW(dev_priv)) {
+ intel_lvds_init(dev_priv);
+ intel_crt_init(dev_priv);
+ } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
bool found = false;
+ if (IS_MOBILE(dev_priv))
+ intel_lvds_init(dev_priv);
+
+ intel_crt_init(dev_priv);
+
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
@@ -14431,11 +14684,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
- } else if (IS_GEN2(dev_priv))
- intel_dvo_init(dev_priv);
- if (SUPPORTS_TV(dev_priv))
- intel_tv_init(dev_priv);
+ if (SUPPORTS_TV(dev_priv))
+ intel_tv_init(dev_priv);
+ } else if (IS_GEN(dev_priv, 2)) {
+ if (IS_I85X(dev_priv))
+ intel_lvds_init(dev_priv);
+
+ intel_crt_init(dev_priv);
+ intel_dvo_init(dev_priv);
+ }
intel_psr_init(dev_priv);
@@ -14602,14 +14860,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
- if (fb->format->format == DRM_FORMAT_NV12 &&
- (fb->width < SKL_MIN_YUV_420_SRC_W ||
- fb->height < SKL_MIN_YUV_420_SRC_H ||
- (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
- DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
- goto err;
- }
-
for (i = 0; i < fb->format->num_planes; i++) {
u32 stride_alignment;
@@ -14629,7 +14879,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
+ if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
is_ccs_modifier(fb->modifier))
stride_alignment *= 4;
@@ -14834,7 +15084,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN2(dev_priv)) {
+ } else if (!IS_GEN(dev_priv, 2)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
@@ -14850,9 +15100,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- } else if (IS_GEN6(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
@@ -14945,7 +15195,7 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
@@ -14984,12 +15234,12 @@ fail:
static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
u32 fdi_pll_clk =
I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
dev_priv->fdi_pll_freq = 270000;
} else {
return;
@@ -15105,10 +15355,10 @@ int intel_modeset_init(struct drm_device *dev)
}
/* maximum framebuffer dimensions */
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -15119,7 +15369,7 @@ int intel_modeset_init(struct drm_device *dev)
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;
} else {
@@ -15186,7 +15436,7 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
sanitize_watermarks(dev);
/*
@@ -15379,6 +15629,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
plane->base.type != DRM_PLANE_TYPE_PRIMARY)
intel_plane_disable_noatomic(crtc, plane);
}
+
+ /*
+ * Disable any background color set by the BIOS, but enable the
+ * gamma and CSC to match how we program our planes.
+ */
+ if (INTEL_GEN(dev_priv) >= 9)
+ I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
+ SKL_BOTTOM_COLOR_GAMMA_ENABLE |
+ SKL_BOTTOM_COLOR_CSC_ENABLE);
}
/* Adjust the state of the output pipe according to whether we
@@ -15386,7 +15645,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base, ctx);
- if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
+ if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -15429,7 +15688,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_GEN6(dev_priv) &&
+ return IS_GEN(dev_priv, 6) &&
crtc_state->base.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
@@ -15514,19 +15773,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- /* This function can be called both from intel_modeset_setup_hw_state or
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses. */
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_VGA);
+ if (!wakeref)
return;
i915_redisable_vga_power_on(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
}
/* FIXME read out full plane state for all planes */
@@ -15826,12 +16091,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
int i;
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
@@ -15847,10 +16113,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
* waits, so we need vblank interrupts restored beforehand.
*/
for_each_intel_crtc(&dev_priv->drm, crtc) {
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
drm_crtc_vblank_reset(&crtc->base);
- if (crtc->base.state->active)
- drm_crtc_vblank_on(&crtc->base);
+ if (crtc_state->base.active)
+ intel_crtc_vblank_on(crtc_state);
}
intel_sanitize_plane_mapping(dev_priv);
@@ -15881,15 +16149,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
}
if (IS_G4X(dev_priv)) {
- g4x_wm_get_hw_state(dev);
+ g4x_wm_get_hw_state(dev_priv);
g4x_wm_sanitize(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_wm_get_hw_state(dev);
+ vlv_wm_get_hw_state(dev_priv);
vlv_wm_sanitize(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 9) {
- skl_wm_get_hw_state(dev);
+ skl_wm_get_hw_state(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_wm_get_hw_state(dev);
+ ilk_wm_get_hw_state(dev_priv);
}
for_each_intel_crtc(dev, crtc) {
@@ -15901,7 +16169,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
modeset_put_power_domains(dev_priv, put_domains);
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
intel_fbc_init_pipe_state(dev_priv);
}
@@ -16124,7 +16392,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 79203666fc62..2220588e86ac 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -122,7 +122,7 @@ enum i9xx_plane_id {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
/*
* Per-pipe plane identifier.
@@ -297,12 +297,12 @@ struct intel_link_m_n {
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
#define for_each_sprite(__dev_priv, __p, __s) \
for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
#define for_each_port_masked(__port, __ports_mask) \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 22a74608c6e4..cf709835fb9a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,13 +32,12 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <asm/byteorder.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
+#include <drm/drm_probe_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -346,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (IS_GEN10(dev_priv))
+ if (IS_GEN(dev_priv, 10))
max_rate = cnl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -430,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
- uint8_t lane_count)
+ u8 lane_count)
{
/*
* FIXME: we need to synchronize the current link parameters with
@@ -450,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int link_rate,
- uint8_t lane_count)
+ u8 lane_count)
{
const struct drm_display_mode *fixed_mode =
intel_dp->attached_connector->panel.fixed_mode;
@@ -465,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
}
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count)
+ int link_rate, u8 lane_count)
{
int index;
@@ -573,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
{
- int i;
- uint32_t v = 0;
+ int i;
+ u32 v = 0;
if (src_bytes > 4)
src_bytes = 4;
for (i = 0; i < src_bytes; i++)
- v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ v |= ((u32)src[i]) << ((3 - i) * 8);
return v;
}
-static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -602,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
static void
intel_dp_pps_init(struct intel_dp *intel_dp);
-static void pps_lock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_lock(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
/*
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+ wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
}
-static void pps_unlock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_unlock(&dev_priv->pps_mutex);
-
intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)),
+ wakeref);
+ return 0;
}
+#define with_pps_lock(dp, wf) \
+ for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
+
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
@@ -635,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- uint32_t DP;
+ u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to port %c being active\n",
@@ -974,30 +982,29 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
- pps_lock(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
- i915_reg_t pp_ctrl_reg, pp_div_reg;
- u32 pp_div;
-
- pp_ctrl_reg = PP_CONTROL(pipe);
- pp_div_reg = PP_DIVISOR(pipe);
- pp_div = I915_READ(pp_div_reg);
- pp_div &= PP_REFERENCE_DIVIDER_MASK;
-
- /* 0x1F write to PP_DIV_REG sets max cycle delay */
- I915_WRITE(pp_div_reg, pp_div | 0x1F);
- I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
- msleep(intel_dp->panel_power_cycle_delay);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ i915_reg_t pp_ctrl_reg, pp_div_reg;
+ u32 pp_div;
+
+ pp_ctrl_reg = PP_CONTROL(pipe);
+ pp_div_reg = PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
}
- pps_unlock(intel_dp);
-
return 0;
}
@@ -1043,17 +1050,21 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
-static uint32_t
+static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- uint32_t status;
+ u32 status;
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
if (!done)
DRM_ERROR("dp aux hw did not signal timeout!\n");
#undef C
@@ -1061,7 +1072,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
return status;
}
-static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1075,7 +1086,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
-static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1094,7 +1105,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
-static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1111,7 +1122,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return ilk_get_aux_clock_divider(intel_dp, index);
}
-static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
/*
* SKL doesn't need us to program the AUX clock divider (Hardware will
@@ -1121,16 +1132,16 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
-static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- uint32_t aux_clock_divider)
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- uint32_t precharge, timeout;
+ u32 precharge, timeout;
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
precharge = 3;
else
precharge = 5;
@@ -1151,12 +1162,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
-static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- uint32_t unused)
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- uint32_t ret;
+ u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
@@ -1176,25 +1187,26 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
static int
intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl, ch_data[5];
- uint32_t aux_clock_divider;
+ u32 aux_clock_divider;
+ intel_wakeref_t wakeref;
int i, ret, recv_bytes;
- uint32_t status;
int try, clock = 0;
+ u32 status;
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- pps_lock(intel_dp);
+ wakeref = pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -1219,6 +1231,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
break;
msleep(1);
}
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
static u32 last_status = -1;
@@ -1338,7 +1352,7 @@ out:
if (vdd)
edp_panel_vdd_off(intel_dp, false);
- pps_unlock(intel_dp);
+ pps_unlock(intel_dp, wakeref);
return ret;
}
@@ -1360,7 +1374,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- uint8_t txbuf[20], rxbuf[20];
+ u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@@ -1693,7 +1707,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
}
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- uint8_t *link_bw, uint8_t *rate_select)
+ u8 *link_bw, u8 *rate_select)
{
/* eDP 1.4 rate select method. */
if (intel_dp->use_rate_select) {
@@ -1810,7 +1824,7 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
/* Optimize link config in order: max bpp, min clock, min lanes */
-static bool
+static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
@@ -1836,17 +1850,17 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = link_clock;
- return true;
+ return 0;
}
}
}
}
- return false;
+ return -EINVAL;
}
/* Optimize link config in order: max bpp, min lanes, min clock */
-static bool
+static int
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
@@ -1872,13 +1886,13 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = link_clock;
- return true;
+ return 0;
}
}
}
}
- return false;
+ return -EINVAL;
}
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
@@ -1896,19 +1910,20 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
return 0;
}
-static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
+ int ret;
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
- return false;
+ return -EINVAL;
dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
conn_state->max_requested_bpc);
@@ -1916,7 +1931,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
- return false;
+ return -EINVAL;
}
/*
@@ -1950,7 +1965,7 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_hdisplay);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
- return false;
+ return -EINVAL;
}
pipe_config->dsc_params.compressed_bpp = min_t(u16,
dsc_max_output_bpp >> 4,
@@ -1967,16 +1982,19 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc_params.dsc_split = true;
} else {
DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
- return false;
+ return -EINVAL;
}
}
- if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+
+ ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+ if (ret < 0) {
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
pipe_config->pipe_bpp,
pipe_config->dsc_params.compressed_bpp);
- return false;
+ return ret;
}
+
pipe_config->dsc_params.compression_enable = true;
DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
"Compressed Bpp = %d Slice Count = %d\n",
@@ -1984,10 +2002,10 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc_params.compressed_bpp,
pipe_config->dsc_params.slice_count);
- return true;
+ return 0;
}
-static bool
+static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1996,7 +2014,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
- bool ret;
+ int ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -2053,10 +2071,12 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
&limits);
/* enable compression if the mode doesn't fit available BW */
- if (!ret) {
- if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
- conn_state, &limits))
- return false;
+ DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+ if (ret || intel_dp->force_dsc_en) {
+ ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits);
+ if (ret < 0)
+ return ret;
}
if (pipe_config->dsc_params.compression_enable) {
@@ -2081,10 +2101,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
}
- return true;
+ return 0;
}
-bool
+int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2100,6 +2120,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
+ int ret;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2121,14 +2142,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode);
if (INTEL_GEN(dev_priv) >= 9) {
- int ret;
-
ret = skl_update_scaler_crtc(pipe_config);
if (ret)
return ret;
}
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
conn_state->scaling_mode);
else
@@ -2137,20 +2156,21 @@ intel_dp_compute_config(struct intel_encoder *encoder,
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
- if (HAS_GMCH_DISPLAY(dev_priv) &&
+ if (HAS_GMCH(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- return false;
+ return -EINVAL;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
- return false;
+ return -EINVAL;
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
intel_dp_supports_fec(intel_dp, pipe_config);
- if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
- return false;
+ ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
+ if (ret < 0)
+ return ret;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
@@ -2198,11 +2218,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
- return true;
+ return 0;
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count,
+ int link_rate, u8 lane_count,
bool link_mst)
{
intel_dp->link_trained = false;
@@ -2464,15 +2484,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
bool vdd;
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- vdd = edp_panel_vdd_on(intel_dp);
- pps_unlock(intel_dp);
-
+ vdd = false;
+ with_pps_lock(intel_dp, wakeref)
+ vdd = edp_panel_vdd_on(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->base.port));
}
@@ -2511,19 +2531,21 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
{
- struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
+ struct intel_dp *intel_dp =
+ container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
+ intel_wakeref_t wakeref;
- pps_lock(intel_dp);
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->want_panel_vdd)
+ edp_panel_vdd_off_sync(intel_dp);
+ }
}
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@@ -2587,7 +2609,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2595,7 +2617,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2604,7 +2626,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2613,12 +2635,13 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
+
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- edp_panel_on(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_on(intel_dp);
}
@@ -2657,25 +2680,25 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
+ intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
+
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- edp_panel_off(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_off(intel_dp);
}
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
+ intel_wakeref_t wakeref;
/*
* If we enable the backlight right away following a panel power
@@ -2685,17 +2708,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
*/
wait_backlight_on(intel_dp);
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
- pps_unlock(intel_dp);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
}
/* Enable backlight PWM and backlight PP control. */
@@ -2717,23 +2739,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
- pps_unlock(intel_dp);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
intel_dp->last_backlight_off = jiffies;
edp_wait_backlight_off(intel_dp);
@@ -2761,12 +2781,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ intel_wakeref_t wakeref;
bool is_enabled;
- pps_lock(intel_dp);
- is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- pps_unlock(intel_dp);
-
+ is_enabled = false;
+ with_pps_lock(intel_dp, wakeref)
+ is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@@ -2833,7 +2853,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2983,16 +3003,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
encoder->port, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -3160,20 +3182,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint32_t *DP,
- uint8_t dp_train_pat)
+ u32 *DP,
+ u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- uint32_t temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3272,24 +3294,23 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_init_panel_power_sequencer(encoder, pipe_config);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ intel_dp_enable_port(intel_dp, pipe_config);
- intel_dp_enable_port(intel_dp, pipe_config);
-
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
-
- pps_unlock(intel_dp);
+ edp_panel_vdd_on(intel_dp);
+ edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+ }
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
@@ -3492,14 +3513,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
* link status information
*/
bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
-uint8_t
+u8
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3518,8 +3539,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -3564,12 +3585,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
}
}
-static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
+static u32 vlv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3650,12 +3671,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
+static u32 chv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
bool uniq_trans_scale = false;
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3733,10 +3754,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-static uint32_t
-g4x_signal_levels(uint8_t train_set)
+static u32
+g4x_signal_levels(u8 train_set)
{
- uint32_t signal_levels = 0;
+ u32 signal_levels = 0;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3772,8 +3793,8 @@ g4x_signal_levels(uint8_t train_set)
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-snb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+snb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3800,8 +3821,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set)
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-ivb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+ivb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3836,8 +3857,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint32_t signal_levels, mask = 0;
- uint8_t train_set = intel_dp->train_set[0];
+ u32 signal_levels, mask = 0;
+ u8 train_set = intel_dp->train_set[0];
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
signal_levels = bxt_signal_levels(intel_dp);
@@ -3851,7 +3872,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev_priv) && port == PORT_A) {
+ } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3876,7 +3897,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@@ -3893,7 +3914,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint32_t val;
+ u32 val;
if (!HAS_DDI(dev_priv))
return;
@@ -3928,7 +3949,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum port port = encoder->port;
- uint32_t DP = intel_dp->DP;
+ u32 DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
return;
@@ -3987,12 +4008,49 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- pps_lock(intel_dp);
- intel_dp->active_pipe = INVALID_PIPE;
- pps_unlock(intel_dp);
+ intel_wakeref_t wakeref;
+
+ with_pps_lock(intel_dp, wakeref)
+ intel_dp->active_pipe = INVALID_PIPE;
}
}
+static void
+intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
+{
+ u8 dpcd_ext[6];
+
+ /*
+ * Prior to DP1.3 the bit represented by
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+ * if it is set DP_DPCD_REV at 0000h could be at a value less than
+ * the true capability of the panel. The only way to check is to
+ * then compare 0000h and 2200h.
+ */
+ if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+ return;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
+ &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
+ DRM_ERROR("DPCD failed read at extended capabilities\n");
+ return;
+ }
+
+ if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+ DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+ return;
+ }
+
+ if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
+ return;
+
+ DRM_DEBUG_KMS("Base DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+
+ memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
+}
+
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
@@ -4000,6 +4058,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
+ intel_dp_extended_receiver_capabilities(intel_dp);
+
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
return intel_dp->dpcd[DP_DPCD_REV] != 0;
@@ -4230,7 +4290,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
-u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
int mode_clock, int mode_hdisplay)
{
u16 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -4297,7 +4357,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(uint8_t, min_slice_count,
+ min_slice_count = min_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
@@ -4315,11 +4375,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
-static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
int test_link_rate;
- uint8_t test_lane_count, test_link_bw;
+ u8 test_lane_count, test_link_bw;
/* (DP CTS 1.2)
* 4.3.1.11
*/
@@ -4352,10 +4412,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
-static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_pattern;
- uint8_t test_misc;
+ u8 test_pattern;
+ u8 test_misc;
__be16 h_width, v_height;
int status = 0;
@@ -4413,9 +4473,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
-static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_ACK;
+ u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -4457,16 +4517,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
-static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
+ u8 test_result = DP_TEST_NAK;
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
- uint8_t response = DP_TEST_NAK;
- uint8_t request = 0;
+ u8 response = DP_TEST_NAK;
+ u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
@@ -4554,12 +4614,10 @@ go_again:
return ret;
} else {
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
- /* send a hotplug event */
- drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
}
}
return -EINVAL;
@@ -4792,8 +4850,8 @@ static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
- uint8_t *dpcd = intel_dp->dpcd;
- uint8_t type;
+ u8 *dpcd = intel_dp->dpcd;
+ u8 type;
if (lspcon->active)
lspcon_resume(lspcon);
@@ -5030,28 +5088,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
+static const char *tc_type_name(enum tc_port_type type)
+{
+ static const char * const names[] = {
+ [TC_PORT_UNKNOWN] = "unknown",
+ [TC_PORT_LEGACY] = "legacy",
+ [TC_PORT_TYPEC] = "typec",
+ [TC_PORT_TBT] = "tbt",
+ };
+
+ if (WARN_ON(type >= ARRAY_SIZE(names)))
+ type = TC_PORT_UNKNOWN;
+
+ return names[type];
+}
+
static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port,
bool is_legacy, bool is_typec, bool is_tbt)
{
enum port port = intel_dig_port->base.port;
enum tc_port_type old_type = intel_dig_port->tc_type;
- const char *type_str;
WARN_ON(is_legacy + is_typec + is_tbt != 1);
- if (is_legacy) {
+ if (is_legacy)
intel_dig_port->tc_type = TC_PORT_LEGACY;
- type_str = "legacy";
- } else if (is_typec) {
+ else if (is_typec)
intel_dig_port->tc_type = TC_PORT_TYPEC;
- type_str = "typec";
- } else if (is_tbt) {
+ else if (is_tbt)
intel_dig_port->tc_type = TC_PORT_TBT;
- type_str = "tbt";
- } else {
+ else
return;
- }
/* Types are not supposed to be changed at runtime. */
WARN_ON(old_type != TC_PORT_UNKNOWN &&
@@ -5059,12 +5127,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
if (old_type != intel_dig_port->tc_type)
DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
- type_str);
+ tc_type_name(intel_dig_port->tc_type));
}
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
-
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -5099,6 +5164,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
val = I915_READ(PORT_TX_DFLEXDPPMS);
if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
+ WARN_ON(dig_port->tc_legacy_port);
return false;
}
@@ -5130,8 +5196,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
* See the comment at the connect function. This implements the Disconnect
* Flow.
*/
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
@@ -5151,6 +5217,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
+ DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
+ port_name(dig_port->base.port),
+ tc_type_name(dig_port->tc_type));
+
dig_port->tc_type = TC_PORT_UNKNOWN;
}
@@ -5172,7 +5242,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
bool is_legacy, is_typec, is_tbt;
u32 dpsp;
- is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
+ /*
+ * WARN if we got a legacy port HPD, but VBT didn't mark the port as
+ * legacy. Treat the port as legacy from now on.
+ */
+ if (WARN_ON(!intel_dig_port->tc_legacy_port &&
+ I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
+ intel_dig_port->tc_legacy_port = true;
+ is_legacy = intel_dig_port->tc_legacy_port;
/*
* The spec says we shouldn't be using the ISR bits for detecting
@@ -5184,6 +5261,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
if (!is_legacy && !is_typec && !is_tbt) {
icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+
return false;
}
@@ -5226,7 +5304,7 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_GMCH_DISPLAY(dev_priv)) {
+ if (HAS_GMCH(dev_priv)) {
if (IS_GM45(dev_priv))
return gm45_digital_port_connected(encoder);
else
@@ -5235,17 +5313,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 11)
return icl_digital_port_connected(encoder);
- else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+ else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else if (IS_GEN8(dev_priv))
+ else if (IS_GEN(dev_priv, 8))
return bdw_digital_port_connected(encoder);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
return ivb_digital_port_connected(encoder);
- else if (IS_GEN6(dev_priv))
+ else if (IS_GEN(dev_priv, 6))
return snb_digital_port_connected(encoder);
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
return ilk_digital_port_connected(encoder);
MISSING_CASE(INTEL_GEN(dev_priv));
@@ -5307,12 +5385,13 @@ intel_dp_detect(struct drm_connector *connector,
enum drm_connector_status status;
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(dev_priv, aux_domain);
+ wakeref = intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -5378,7 +5457,7 @@ intel_dp_detect(struct drm_connector *connector,
ret = intel_dp_retrain_link(encoder, ctx);
if (ret) {
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
return ret;
}
}
@@ -5402,7 +5481,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
return status;
}
@@ -5415,6 +5494,7 @@ intel_dp_force(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5423,11 +5503,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_display_power_get(dev_priv, aux_domain);
+ wakeref = intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5492,21 +5572,22 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
-void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (intel_dp_is_edp(intel_dp)) {
+ intel_wakeref_t wakeref;
+
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
if (intel_dp->edp_notifier.notifier_call) {
unregister_reboot_notifier(&intel_dp->edp_notifier);
@@ -5515,14 +5596,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
intel_dp_aux_fini(intel_dp);
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ kfree(enc_to_dig_port(encoder));
}
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
@@ -5532,9 +5619,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
}
static
@@ -5547,7 +5633,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
.address = DP_AUX_HDCP_AKSV,
.size = DRM_HDCP_KSV_LEN,
};
- uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
+ u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
ssize_t dpcd_ret;
int ret;
@@ -5580,7 +5666,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
}
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
- return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
+ if (reply != DP_AUX_NATIVE_REPLY_ACK) {
+ DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+ reply);
+ return -EIO;
+ }
+ return 0;
}
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
@@ -5810,6 +5901,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+ intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5819,18 +5911,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
- pps_lock(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
- if (intel_dp_is_edp(intel_dp)) {
- /* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
+ if (intel_dp_is_edp(intel_dp)) {
+ /*
+ * Reinit the power sequencer, in case BIOS did
+ * something nasty with it.
+ */
+ intel_dp_pps_init(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
}
-
- pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5863,6 +5956,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum irqreturn ret = IRQ_NONE;
+ intel_wakeref_t wakeref;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -5885,8 +5979,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
return IRQ_NONE;
}
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5916,7 +6010,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
put_power:
intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(intel_dig_port),
+ wakeref);
return ret;
}
@@ -5947,7 +6042,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 6, 10);
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
@@ -5956,7 +6051,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
@@ -6363,8 +6458,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
}
mutex_lock(&dev_priv->drrs.mutex);
- if (WARN_ON(dev_priv->drrs.dp)) {
- DRM_ERROR("DRRS already enabled\n");
+ if (dev_priv->drrs.dp) {
+ DRM_DEBUG_KMS("DRRS already enabled\n");
goto unlock;
}
@@ -6624,8 +6719,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
- struct edid *edid;
enum pipe pipe = INVALID_PIPE;
+ intel_wakeref_t wakeref;
+ struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -6645,13 +6741,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- pps_lock(intel_dp);
-
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
-
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ intel_dp_pps_init(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -6736,9 +6830,8 @@ out_vdd_off:
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
return false;
}
@@ -6830,7 +6923,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -6912,6 +7005,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->compute_config = intel_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
+ intel_encoder->update_pipe = intel_panel_update_backlight;
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
@@ -7006,7 +7100,10 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
- if (ret)
- intel_dp_check_mst_status(intel_dp);
+ if (ret) {
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ false);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 30be0e39bd5f..b59c87daa4f7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -24,7 +24,7 @@
#include "intel_drv.h"
static void
-intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
{
DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
@@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
- const uint8_t link_status[DP_LINK_STATUS_SIZE])
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
- uint8_t v = 0;
- uint8_t p = 0;
+ u8 v = 0;
+ u8 p = 0;
int lane;
- uint8_t voltage_max;
- uint8_t preemph_max;
+ u8 voltage_max;
+ u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
- uint8_t buf[sizeof(intel_dp->train_set) + 1];
+ u8 buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
@@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
@@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
- uint8_t voltage;
+ u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
- uint8_t link_config[2];
- uint8_t link_bw, rate_select;
+ u8 link_config[2];
+ u8 link_bw, rate_select;
if (intel_dp->prepare_link_retrain)
intel_dp->prepare_link_retrain(intel_dp);
@@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ u8 link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
int tries;
u32 training_pattern;
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 4de247ddf05f..fb67cd931117 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,16 +23,15 @@
*
*/
-#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
-static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
@@ -41,15 +40,19 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
void *port = to_intel_connector(connector)->port;
struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
int bpp;
- int lane_count, slots = 0;
+ int lane_count, slots =
+ to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
@@ -77,17 +80,12 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
- /* Zombie connectors can't have VCPI slots */
- if (!drm_connector_is_unregistered(connector)) {
- slots = drm_dp_atomic_find_vcpi_slots(state,
- &intel_dp->mst_mgr,
- port,
- mst_pbn);
- if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
- slots);
- return false;
- }
+ slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
+ mst_pbn);
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
+ slots);
+ return slots;
}
intel_link_compute_m_n(bpp, lane_count,
@@ -104,38 +102,42 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
- return true;
+ return 0;
}
-static int intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+static int
+intel_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state)
{
struct drm_atomic_state *state = new_conn_state->state;
- struct drm_connector_state *old_conn_state;
- struct drm_crtc *old_crtc;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct intel_connector *intel_connector =
+ to_intel_connector(connector);
+ struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
- int slots, ret = 0;
-
- old_conn_state = drm_atomic_get_old_connector_state(state, connector);
- old_crtc = old_conn_state->crtc;
- if (!old_crtc)
- return ret;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int ret = 0;
- crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
- slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
- if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
- struct drm_dp_mst_topology_mgr *mgr;
- struct drm_encoder *old_encoder;
+ if (!old_conn_state->crtc)
+ return 0;
- old_encoder = old_conn_state->best_encoder;
- mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
+ /* We only want to free VCPI if this state disables the CRTC on this
+ * connector
+ */
+ if (new_crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
- if (ret)
- DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
- else
- to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
+ if (!crtc_state ||
+ !drm_atomic_crtc_needs_modeset(crtc_state) ||
+ crtc_state->enable)
+ return 0;
}
+
+ mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
+ ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+ intel_connector->port);
+
return ret;
}
@@ -240,7 +242,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
- uint32_t temp;
+ u32 temp;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
@@ -457,6 +459,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
+ drm_dp_mst_get_port_malloc(port);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
@@ -517,20 +520,10 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_put(connector);
}
-static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
-{
- struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
-
- drm_kms_helper_hotplug_event(dev);
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
- .hotplug = intel_dp_mst_hotplug,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 3c7f10d17658..95cb8b154f87 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -413,7 +413,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
}
if (phy_info->rcomp_phy != -1) {
- uint32_t grc_code;
+ u32 grc_code;
bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
@@ -445,7 +445,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- uint32_t val;
+ u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -515,7 +515,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- uint32_t mask;
+ u32 mask;
bool ok;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -567,8 +567,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
#undef _CHK
}
-uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
+u8
+bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@@ -585,7 +585,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
}
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_lat_optim_mask)
+ u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -610,7 +610,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
}
}
-uint8_t
+u8
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -618,7 +618,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
- uint8_t mask;
+ u8 mask;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
@@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index d513ca875c67..0a42d11c4c33 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -247,7 +247,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
enum intel_dpll_id range_max)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll;
+ struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
@@ -257,8 +257,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (shared_dpll[i].crtc_mask == 0)
+ if (shared_dpll[i].crtc_mask == 0) {
+ if (!unused_pll)
+ unused_pll = pll;
continue;
+ }
if (memcmp(&crtc_state->dpll_hw_state,
&shared_dpll[i].hw_state,
@@ -273,14 +276,11 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
}
/* Ok no matching timings, maybe there's a free one? */
- for (i = range_min; i <= range_max; i++) {
- pll = &dev_priv->shared_dplls[i];
- if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name);
- return pll;
- }
+ if (unused_pll) {
+ DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ unused_pll->info->name);
+ return unused_pll;
}
return NULL;
@@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(PCH_DPLL(id));
@@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(id));
hw_state->fp1 = I915_READ(PCH_FP1(id));
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & DPLL_VCO_ENABLE;
}
@@ -487,7 +490,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
@@ -497,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t val;
+ u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
@@ -509,15 +512,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & WRPLL_PLL_ENABLE;
}
@@ -526,15 +532,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & SPLL_PLL_ENABLE;
}
@@ -630,11 +639,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
return budget;
}
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
+static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
+ unsigned int r2, unsigned int n2,
+ unsigned int p,
struct hsw_wrpll_rnp *best)
{
- uint64_t a, b, c, d, diff, diff_best;
+ u64 a, b, c, d, diff, diff_best;
/* No best (r,n,p) yet */
if (best->p == 0) {
@@ -693,7 +703,7 @@ static void
hsw_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
- uint64_t freq2k;
+ u64 freq2k;
unsigned p, n2, r2;
struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
@@ -759,7 +769,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
- uint32_t val;
+ u32 val;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
@@ -921,7 +931,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
val = I915_READ(DPLL_CTRL1);
@@ -986,12 +996,15 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
+ u32 val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1011,7 +1024,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -1020,12 +1033,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ u32 val;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1041,15 +1057,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
struct skl_wrpll_context {
- uint64_t min_deviation; /* current minimal deviation */
- uint64_t central_freq; /* chosen central freq */
- uint64_t dco_freq; /* chosen dco freq */
+ u64 min_deviation; /* current minimal deviation */
+ u64 central_freq; /* chosen central freq */
+ u64 dco_freq; /* chosen dco freq */
unsigned int p; /* chosen divider */
};
@@ -1065,11 +1081,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
#define SKL_DCO_MAX_NDEVIATION 600
static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
- uint64_t central_freq,
- uint64_t dco_freq,
+ u64 central_freq,
+ u64 dco_freq,
unsigned int divider)
{
- uint64_t deviation;
+ u64 deviation;
deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
central_freq);
@@ -1143,21 +1159,21 @@ static void skl_wrpll_get_multipliers(unsigned int p,
}
struct skl_wrpll_params {
- uint32_t dco_fraction;
- uint32_t dco_integer;
- uint32_t qdiv_ratio;
- uint32_t qdiv_mode;
- uint32_t kdiv;
- uint32_t pdiv;
- uint32_t central_freq;
+ u32 dco_fraction;
+ u32 dco_integer;
+ u32 qdiv_ratio;
+ u32 qdiv_mode;
+ u32 kdiv;
+ u32 pdiv;
+ u32 central_freq;
};
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
- uint64_t afe_clock,
- uint64_t central_freq,
- uint32_t p0, uint32_t p1, uint32_t p2)
+ u64 afe_clock,
+ u64 central_freq,
+ u32 p0, u32 p1, u32 p2)
{
- uint64_t dco_freq;
+ u64 dco_freq;
switch (central_freq) {
case 9600000000ULL:
@@ -1223,10 +1239,10 @@ static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
+ u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ u64 dco_central_freq[3] = { 8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL };
static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
24, 28, 30, 32, 36, 40, 42, 44,
48, 52, 54, 56, 60, 64, 66, 68,
@@ -1250,7 +1266,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
for (i = 0; i < dividers[d].n_dividers; i++) {
unsigned int p = dividers[d].list[i];
- uint64_t dco_freq = p * afe_clock;
+ u64 dco_freq = p * afe_clock;
skl_wrpll_try_divider(&ctx,
dco_central_freq[dco],
@@ -1296,7 +1312,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
int clock)
{
- uint32_t ctrl1, cfgcr1, cfgcr2;
+ u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
/*
@@ -1333,7 +1349,7 @@ static bool
skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
- uint32_t ctrl1;
+ u32 ctrl1;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1435,7 +1451,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t temp;
+ u32 temp;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
@@ -1556,7 +1572,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- uint32_t temp;
+ u32 temp;
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
@@ -1579,14 +1595,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- uint32_t val;
- bool ret;
+ intel_wakeref_t wakeref;
enum dpio_phy phy;
enum dpio_channel ch;
+ u32 val;
+ bool ret;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1643,7 +1662,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -1651,12 +1670,12 @@ out:
/* bxt clock parameters */
struct bxt_clk_div {
int clock;
- uint32_t p1;
- uint32_t p2;
- uint32_t m2_int;
- uint32_t m2_frac;
+ u32 p1;
+ u32 p2;
+ u32 m2_int;
+ u32 m2_frac;
bool m2_frac_en;
- uint32_t n;
+ u32 n;
int vco;
};
@@ -1723,8 +1742,8 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
int vco = clk_div->vco;
- uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t lanestagger;
+ u32 prop_coef, int_coef, gain_ctl, targ_cnt;
+ u32 lanestagger;
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
@@ -1873,7 +1892,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_GEN(dev_priv) < 9) {
- uint32_t val = I915_READ(LCPLL_CTL);
+ u32 val = I915_READ(LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -1959,7 +1978,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(id));
@@ -2034,7 +2053,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
/*
* 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
@@ -2091,10 +2110,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -2113,7 +2135,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -2225,7 +2247,7 @@ cnl_ddi_calculate_wrpll(int clock,
struct skl_wrpll_params *wrpll_params)
{
u32 afe_clock = clock * 5;
- uint32_t ref_clock;
+ u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2271,7 +2293,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
int clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t cfgcr0, cfgcr1;
+ u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
@@ -2300,7 +2322,7 @@ static bool
cnl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
- uint32_t cfgcr0;
+ u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
@@ -2517,7 +2539,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t cfgcr0, cfgcr1;
+ u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
@@ -2547,10 +2569,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
}
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id)
+ u32 pll_id)
{
- uint32_t cfgcr0, cfgcr1;
- uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ u32 cfgcr0, cfgcr1;
+ u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
const struct skl_wrpll_params *params;
int index, n_entries, link_clock;
@@ -2617,14 +2639,14 @@ int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
return link_clock;
}
-static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
- return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
+ return id - DPLL_ID_ICL_MGPLL1;
}
-enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
{
- return port - PORT_C + DPLL_ID_ICL_MGPLL1;
+ return tc_port + DPLL_ID_ICL_MGPLL1;
}
bool intel_dpll_is_combophy(enum intel_dpll_id id)
@@ -2633,10 +2655,10 @@ bool intel_dpll_is_combophy(enum intel_dpll_id id)
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
- uint32_t *target_dco_khz,
+ u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
{
- uint32_t dco_min_freq, dco_max_freq;
+ u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
unsigned int i;
int div2;
@@ -2712,12 +2734,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
- uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
- uint32_t iref_ndiv, iref_trim, iref_pulse_w;
- uint32_t prop_coeff, int_coeff;
- uint32_t tdc_targetcnt, feedfwgain;
- uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
- uint64_t tmp;
+ u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+ u32 iref_ndiv, iref_trim, iref_pulse_w;
+ u32 prop_coeff, int_coeff;
+ u32 tdc_targetcnt, feedfwgain;
+ u64 ssc_stepsize, ssc_steplen, ssc_steplog;
+ u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
@@ -2740,7 +2762,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
m2div_rem = dco_khz % (refclk_khz * m1div);
- tmp = (uint64_t)m2div_rem * (1 << 22);
+ tmp = (u64)m2div_rem * (1 << 22);
do_div(tmp, refclk_khz * m1div);
m2div_frac = tmp;
@@ -2799,11 +2821,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (use_ssc) {
- tmp = (uint64_t)dco_khz * 47 * 32;
+ tmp = (u64)dco_khz * 47 * 32;
do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp;
- tmp = (uint64_t)dco_khz * 1000;
+ tmp = (u64)dco_khz * 1000;
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else {
ssc_stepsize = 0;
@@ -2903,7 +2925,10 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
&pll_state);
} else {
- min = icl_port_to_mg_pll_id(port);
+ enum tc_port tc_port;
+
+ tc_port = intel_port_to_tc(dev_priv, port);
+ min = icl_tc_port_to_pll_id(tc_port);
max = min;
ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
&pll_state);
@@ -2937,12 +2962,8 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
return CNL_DPLL_ENABLE(id);
else if (id == DPLL_ID_ICL_TBTPLL)
return TBT_PLL_ENABLE;
- else
- /*
- * TODO: Make MG_PLL macros use
- * tc port id instead of port id
- */
- return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
+
+ return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2950,11 +2971,13 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
- enum port port;
+ intel_wakeref_t wakeref;
bool ret = false;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(icl_pll_id_to_enable_reg(id));
@@ -2966,32 +2989,33 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
} else {
- port = icl_mg_pll_id_to_port(id);
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+
+ hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+ hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
+ hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
if (dev_priv->cdclk.hw.ref == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
@@ -3007,7 +3031,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -3026,7 +3050,7 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
- enum port port = icl_mg_pll_id_to_port(pll->info->id);
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
u32 val;
/*
@@ -3035,41 +3059,41 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = I915_READ(MG_REFCLKIN_CTL(port));
+ val = I915_READ(MG_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(MG_REFCLKIN_CTL(port), val);
+ I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+ val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+ I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+ val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
+ I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
- I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
- I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
- I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
- I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
- I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
+ I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
+ I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = I915_READ(MG_PLL_BIAS(port));
+ val = I915_READ(MG_PLL_BIAS(tc_port));
val &= ~hw_state->mg_pll_bias_mask;
val |= hw_state->mg_pll_bias;
- I915_WRITE(MG_PLL_BIAS(port), val);
+ I915_WRITE(MG_PLL_BIAS(tc_port), val);
- val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+ I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
+ POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3077,7 +3101,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
- uint32_t val;
+ u32 val;
val = I915_READ(enable_reg);
val |= PLL_POWER_ENABLE;
@@ -3118,7 +3142,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
- uint32_t val;
+ u32 val;
/* The first steps are done by intel_ddi_post_disable(). */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index a033d8f06d4a..40e8391a92f2 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -138,14 +138,14 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
/* i9xx, pch plls */
- uint32_t dpll;
- uint32_t dpll_md;
- uint32_t fp0;
- uint32_t fp1;
+ u32 dpll;
+ u32 dpll_md;
+ u32 fp0;
+ u32 fp1;
/* hsw, bdw */
- uint32_t wrpll;
- uint32_t spll;
+ u32 wrpll;
+ u32 spll;
/* skl */
/*
@@ -154,34 +154,33 @@ struct intel_dpll_hw_state {
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
- uint32_t ctrl1;
+ u32 ctrl1;
/* HDMI only, 0 when used for DP */
- uint32_t cfgcr1, cfgcr2;
+ u32 cfgcr1, cfgcr2;
/* cnl */
- uint32_t cfgcr0;
+ u32 cfgcr0;
/* CNL also uses cfgcr1 */
/* bxt */
- uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
- pcsdw12;
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
/*
* ICL uses the following, already defined:
- * uint32_t cfgcr0, cfgcr1;
- */
- uint32_t mg_refclkin_ctl;
- uint32_t mg_clktop2_coreclkctl1;
- uint32_t mg_clktop2_hsclkctl;
- uint32_t mg_pll_div0;
- uint32_t mg_pll_div1;
- uint32_t mg_pll_lf;
- uint32_t mg_pll_frac_lock;
- uint32_t mg_pll_ssc;
- uint32_t mg_pll_bias;
- uint32_t mg_pll_tdc_coldst_bias;
- uint32_t mg_pll_bias_mask;
- uint32_t mg_pll_tdc_coldst_bias_mask;
+ * u32 cfgcr0, cfgcr1;
+ */
+ u32 mg_refclkin_ctl;
+ u32 mg_clktop2_coreclkctl1;
+ u32 mg_clktop2_hsclkctl;
+ u32 mg_pll_div0;
+ u32 mg_pll_div1;
+ u32 mg_pll_lf;
+ u32 mg_pll_frac_lock;
+ u32 mg_pll_ssc;
+ u32 mg_pll_bias;
+ u32 mg_pll_tdc_coldst_bias;
+ u32 mg_pll_bias_mask;
+ u32 mg_pll_tdc_coldst_bias_mask;
};
/**
@@ -280,7 +279,7 @@ struct dpll_info {
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
*/
- uint32_t flags;
+ u32 flags;
};
/**
@@ -343,9 +342,9 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id);
+ u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
-enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e9ddeaf05a14..15db41394b9e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -29,18 +29,22 @@
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
#include <drm/drm_atomic.h>
#include <media/cec-notifier.h>
+struct drm_printer;
+
/**
* __wait_for - magic wait macro
*
@@ -232,9 +236,9 @@ struct intel_encoder {
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
- bool (*compute_config)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ int (*compute_config)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
void (*pre_pll_enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -253,6 +257,9 @@ struct intel_encoder {
void (*post_pll_disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*update_pipe)(struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -304,13 +311,12 @@ struct intel_panel {
/* Connector and platform specific backlight functions */
int (*setup)(struct intel_connector *connector, enum pipe pipe);
- uint32_t (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
+ u32 (*get)(struct intel_connector *connector);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
void (*disable)(const struct drm_connector_state *conn_state);
void (*enable)(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- uint32_t (*hz_to_pwm)(struct intel_connector *connector,
- uint32_t hz);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -602,7 +608,7 @@ struct intel_initial_plane_config {
struct intel_scaler {
int in_use;
- uint32_t mode;
+ u32 mode;
};
struct intel_crtc_scaler_state {
@@ -634,13 +640,15 @@ struct intel_crtc_scaler_state {
};
/* drm_mode->private_flags */
-#define I915_MODE_FLAG_INHERITED 1
+#define I915_MODE_FLAG_INHERITED (1<<0)
/* Flag to get scanline using frame time stamps */
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
+/* Flag to use the scanline counter instead of the pixel counter */
+#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- uint32_t linetime;
+ u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
@@ -656,7 +664,7 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
- uint32_t linetime;
+ u32 linetime;
};
enum vlv_wm_level {
@@ -669,7 +677,7 @@ enum vlv_wm_level {
struct vlv_wm_state {
struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
- uint8_t num_levels;
+ u8 num_levels;
bool cxsr;
};
@@ -882,13 +890,13 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
- uint8_t lane_count;
+ u8 lane_count;
/*
* Used by platforms having DP/HDMI PHY with programmable lane
* latency optimization.
*/
- uint8_t lane_lat_optim_mask;
+ u8 lane_lat_optim_mask;
/* minimum acceptable voltage level */
u8 min_voltage_level;
@@ -932,7 +940,7 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
- uint32_t gamma_mode;
+ u32 gamma_mode;
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
@@ -1018,7 +1026,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
- uint32_t frontbuffer_bit;
+ u32 frontbuffer_bit;
struct {
u32 base, cntl, size;
@@ -1084,7 +1092,6 @@ struct intel_hdmi {
} dp_dual_mode;
bool has_hdmi_sink;
bool has_audio;
- bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
struct cec_notifier *cec_notifier;
};
@@ -1114,9 +1121,9 @@ enum link_m_n_set {
struct intel_dp_compliance_data {
unsigned long edid;
- uint8_t video_pattern;
- uint16_t hdisplay, vdisplay;
- uint8_t bpc;
+ u8 video_pattern;
+ u16 hdisplay, vdisplay;
+ u8 bpc;
};
struct intel_dp_compliance {
@@ -1129,18 +1136,18 @@ struct intel_dp_compliance {
struct intel_dp {
i915_reg_t output_reg;
- uint32_t DP;
+ u32 DP;
int link_rate;
- uint8_t lane_count;
- uint8_t sink_count;
+ u8 lane_count;
+ u8 sink_count;
bool link_mst;
bool link_trained;
bool has_audio;
bool reset_link_params;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
- uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capable;
/* source rates */
@@ -1160,7 +1167,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
- uint8_t train_set[4];
+ u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@@ -1202,14 +1209,13 @@ struct intel_dp {
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
- uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
* register with to kick off an AUX transaction.
*/
- uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
- int send_bytes,
- uint32_t aux_clock_divider);
+ u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes,
+ u32 aux_clock_divider);
i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
@@ -1219,6 +1225,9 @@ struct intel_dp {
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
+
+ /* Display stream compression testing */
+ bool force_dsc_en;
};
enum lspcon_vendor {
@@ -1240,10 +1249,11 @@ struct intel_digital_port {
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
- uint8_t max_lanes;
+ u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ bool tc_legacy_port:1;
enum tc_port_type tc_type;
void (*write_infoframe)(struct intel_encoder *encoder,
@@ -1474,8 +1484,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1538,7 +1548,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
@@ -1678,11 +1688,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val);
+ u64 *val);
int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val);
+ u64 val);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -1756,9 +1766,10 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-u32 glk_color_ctl(const struct intel_plane_state *plane_state);
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
@@ -1802,10 +1813,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count,
+ int link_rate, u8 lane_count,
bool link_mst);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count);
+ int link_rate, u8 lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
@@ -1816,10 +1827,10 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
@@ -1837,7 +1848,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -1850,24 +1861,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- uint8_t dp_train_pat);
+ u8 dp_train_pat);
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-uint8_t
+u8
intel_dp_voltage_max(struct intel_dp *intel_dp);
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- uint8_t *link_bw, uint8_t *rate_select);
+ u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
-uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
- int mode_clock, int mode_hdisplay);
-uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+ int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1884,6 +1895,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1977,9 +1990,9 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
struct drm_connector *connector,
bool high_tmds_clock_ratio,
@@ -2024,6 +2037,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
enum pipe pipe);
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
@@ -2085,6 +2101,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
@@ -2107,6 +2124,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -2114,33 +2132,42 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+#else
+#define intel_display_power_put(i915, domain, wakeref) \
+ intel_display_power_put_unchecked(i915, domain)
+#endif
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
+assert_rpm_device_not_suspended(struct drm_i915_private *i915)
{
- WARN_ONCE(dev_priv->runtime_pm.suspended,
+ WARN_ONCE(i915->runtime_pm.suspended,
"Device suspended during HW access\n");
}
static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
{
- assert_rpm_device_not_suspended(dev_priv);
- WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
+ assert_rpm_device_not_suspended(i915);
+ WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function disable asserts that check if we hold an RPM wakelock
* reference, while keeping the device-not-suspended checks still enabled.
@@ -2157,14 +2184,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
* enable_rpm_wakeref_asserts().
*/
static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+ atomic_inc(&i915->runtime_pm.wakeref_count);
}
/**
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function re-enables the RPM assert checks after disabling them with
* disable_rpm_wakeref_asserts. It's meant to be used only in special
@@ -2174,15 +2201,39 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
* disable_rpm_wakeref_asserts().
*/
static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ atomic_dec(&i915->runtime_pm.wakeref_count);
}
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+
+#define with_intel_runtime_pm(i915, wf) \
+ for ((wf) = intel_runtime_pm_get(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+#define with_intel_runtime_pm_if_in_use(i915, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p);
+#else
+static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p)
+{
+}
+#endif
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
@@ -2210,16 +2261,16 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_device *dev);
-void vlv_wm_get_hw_state(struct drm_device *dev);
-void ilk_wm_get_hw_state(struct drm_device *dev);
-void skl_wm_get_hw_state(struct drm_device *dev);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -2288,11 +2339,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv);
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
- uint64_t *val);
+ u64 *val);
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
- uint64_t val);
+ u64 val);
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_state);
struct drm_connector_state *
@@ -2337,10 +2388,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *intel_state);
/* intel_color.c */
-void intel_color_init(struct drm_crtc *crtc);
-int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void intel_color_set_csc(struct drm_crtc_state *crtc_state);
-void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
/* intel_lspcon.c */
bool lspcon_init(struct intel_digital_port *intel_dig_port);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index d968f1f13e09..a9a19778dc7f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -24,7 +24,6 @@
#ifndef _INTEL_DSI_H
#define _INTEL_DSI_H
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
@@ -40,6 +39,7 @@ struct intel_dsi {
struct intel_encoder base;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+ intel_wakeref_t io_wakeref[I915_MAX_PORTS];
/* GPIO Desc for CRC based Panel control */
struct gpio_desc *gpio_panel;
@@ -173,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void vlv_dsi_pll_disable(struct intel_encoder *encoder);
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
@@ -183,7 +183,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void bxt_dsi_pll_disable(struct intel_encoder *encoder);
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a1a8b3790e61..06a11c35a784 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -24,15 +24,15 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/gpio/consumer.h>
+#include <linux/mfd/intel_soc_pmic.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
-#include <video/mipi_display.h>
+#include <asm/unaligned.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
@@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+#ifdef CONFIG_PMIC_OPREGION
+ u32 value, mask, reg_address;
+ u16 i2c_address;
+ int ret;
+
+ /* byte 0 aka PMIC Flag is reserved */
+ i2c_address = get_unaligned_le16(data + 1);
+ reg_address = get_unaligned_le32(data + 3);
+ value = get_unaligned_le32(data + 7);
+ mask = get_unaligned_le32(data + 11);
+
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
+ reg_address,
+ value, mask);
+ if (ret)
+ DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+#else
+ DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+#endif
return data + 15;
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 0042a7f69387..a6c82482a841 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -26,7 +26,6 @@
*/
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
@@ -235,9 +234,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
-static bool intel_dvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_dvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
const struct drm_display_mode *fixed_mode =
@@ -254,10 +253,11 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- return true;
+
+ return 0;
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index af2873403009..49fa43ff02ba 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -25,6 +25,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_reset.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -261,6 +262,31 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
}
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ i915_reg_t hwstam;
+
+ /*
+ * Though they added more rings on g4x/ilk, they did not add
+ * per-engine HWSTAM until gen6.
+ */
+ if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+ return;
+
+ hwstam = RING_HWSTAM(engine->mmio_base);
+ if (INTEL_GEN(dev_priv) >= 3)
+ I915_WRITE(hwstam, mask);
+ else
+ I915_WRITE16(hwstam, mask);
+}
+
+static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
+{
+ /* Mask off all writes into the unknown HWSP */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+}
+
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
@@ -312,6 +338,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ /* Scrub mmio state on takeover */
+ intel_engine_sanitize_mmio(engine);
+
dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine;
return 0;
@@ -365,7 +394,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
goto cleanup;
}
- device_info->num_rings = hweight32(mask);
+ RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@@ -426,33 +455,9 @@ cleanup:
return err;
}
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- /* Our semaphore implementation is strictly monotonic (i.e. we proceed
- * so long as the semaphore value in the register/page is greater
- * than the sync value), so whenever we reset the seqno,
- * so long as we reset the tracking semaphore value to 0, it will
- * always be before the next request's seqno. If we don't reset
- * the semaphore value, then when the seqno moves backwards all
- * future waits will complete instantly (causing rendering corruption).
- */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
- if (HAS_VEBOX(dev_priv))
- I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
- }
-
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
-
- /* After manually advancing the seqno, fake the interrupt in case
- * there are any waiters for that seqno.
- */
- intel_engine_wakeup(engine);
-
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
}
@@ -469,50 +474,67 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
- execlists->queue_priority = INT_MIN;
+ execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
}
-/**
- * intel_engines_setup_common - setup engine state not requiring hw access
- * @engine: Engine to setup.
- *
- * Initializes @engine@ structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-void intel_engine_setup_common(struct intel_engine_cs *engine)
+static void cleanup_status_page(struct intel_engine_cs *engine)
{
- i915_timeline_init(engine->i915, &engine->timeline, engine->name);
- i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
+ struct i915_vma *vma;
- intel_engine_init_execlist(engine);
- intel_engine_init_hangcheck(engine);
- intel_engine_init_batch_pool(engine);
- intel_engine_init_cmd_parser(engine);
+ /* Prevent writes into HWSP after returning the page to the system */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+
+ vma = fetch_and_zero(&engine->status_page.vma);
+ if (!vma)
+ return;
+
+ if (!HWS_NEEDS_PHYSICAL(engine->i915))
+ i915_vma_unpin(vma);
+
+ i915_gem_object_unpin_map(vma->obj);
+ __i915_gem_object_release_unless_active(vma->obj);
}
-static void cleanup_status_page(struct intel_engine_cs *engine)
+static int pin_ggtt_status_page(struct intel_engine_cs *engine,
+ struct i915_vma *vma)
{
- if (HWS_NEEDS_PHYSICAL(engine->i915)) {
- void *addr = fetch_and_zero(&engine->status_page.page_addr);
+ unsigned int flags;
- __free_page(virt_to_page(addr));
- }
+ flags = PIN_GLOBAL;
+ if (!HAS_LLC(engine->i915))
+ /*
+ * On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actually map it).
+ */
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
- i915_vma_unpin_and_release(&engine->status_page.vma,
- I915_VMA_RELEASE_MAP);
+ return i915_vma_pin(vma, 0, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- unsigned int flags;
void *vaddr;
int ret;
+ /*
+ * Though the HWS register does support 36bit addresses, historically
+ * we have had hangs and corruption reported due to wild writes if
+ * the HWS is placed above 4G. We only allow objects to be allocated
+ * in GFP_DMA32 for i965, and no earlier physical address users had
+ * access to more than 4G.
+ */
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
@@ -529,59 +551,67 @@ static int init_status_page(struct intel_engine_cs *engine)
goto err;
}
- flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
- /* On g33, we cannot place HWS above 256MiB, so
- * restrict its pinning to the low mappable arena.
- * Though this restriction is not documented for
- * gen4, gen5, or byt, they also behave similarly
- * and hang if the HWS is placed at the top of the
- * GTT. To generalise, it appears that all !llc
- * platforms have issues with us placing the HWS
- * above the mappable region (even though we never
- * actually map it).
- */
- flags |= PIN_MAPPABLE;
- else
- flags |= PIN_HIGH;
- ret = i915_vma_pin(vma, 0, 0, flags);
- if (ret)
- goto err;
-
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto err_unpin;
+ goto err;
}
+ engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
engine->status_page.vma = vma;
- engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
+
+ if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
+ ret = pin_ggtt_status_page(engine, vma);
+ if (ret)
+ goto err_unpin;
+ }
+
return 0;
err_unpin:
- i915_vma_unpin(vma);
+ i915_gem_object_unpin_map(obj);
err:
i915_gem_object_put(obj);
return ret;
}
-static int init_phys_status_page(struct intel_engine_cs *engine)
+/**
+ * intel_engines_setup_common - setup engine state not requiring hw access
+ * @engine: Engine to setup.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+int intel_engine_setup_common(struct intel_engine_cs *engine)
{
- struct page *page;
+ int err;
- /*
- * Though the HWS register does support 36bit addresses, historically
- * we have had hangs and corruption reported due to wild writes if
- * the HWS is placed above 4G.
- */
- page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
- if (!page)
- return -ENOMEM;
+ err = init_status_page(engine);
+ if (err)
+ return err;
+
+ err = i915_timeline_init(engine->i915,
+ &engine->timeline,
+ engine->name,
+ engine->status_page.vma);
+ if (err)
+ goto err_hwsp;
- engine->status_page.page_addr = page_address(page);
+ i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
+
+ intel_engine_init_breadcrumbs(engine);
+ intel_engine_init_execlist(engine);
+ intel_engine_init_hangcheck(engine);
+ intel_engine_init_batch_pool(engine);
+ intel_engine_init_cmd_parser(engine);
return 0;
+
+err_hwsp:
+ cleanup_status_page(engine);
+ return err;
}
static void __intel_context_unpin(struct i915_gem_context *ctx,
@@ -590,6 +620,56 @@ static void __intel_context_unpin(struct i915_gem_context *ctx,
intel_context_unpin(to_intel_context(ctx, engine));
}
+struct measure_breadcrumb {
+ struct i915_request rq;
+ struct i915_timeline timeline;
+ struct intel_ring ring;
+ u32 cs[1024];
+};
+
+static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+{
+ struct measure_breadcrumb *frame;
+ int dw = -ENOMEM;
+
+ GEM_BUG_ON(!engine->i915->gt.scratch);
+
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame)
+ return -ENOMEM;
+
+ if (i915_timeline_init(engine->i915,
+ &frame->timeline, "measure",
+ engine->status_page.vma))
+ goto out_frame;
+
+ INIT_LIST_HEAD(&frame->ring.request_list);
+ frame->ring.timeline = &frame->timeline;
+ frame->ring.vaddr = frame->cs;
+ frame->ring.size = sizeof(frame->cs);
+ frame->ring.effective_size = frame->ring.size;
+ intel_ring_update_space(&frame->ring);
+
+ frame->rq.i915 = engine->i915;
+ frame->rq.engine = engine;
+ frame->rq.ring = &frame->ring;
+ frame->rq.timeline = &frame->timeline;
+
+ dw = i915_timeline_pin(&frame->timeline);
+ if (dw < 0)
+ goto out_timeline;
+
+ dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
+ i915_timeline_unpin(&frame->timeline);
+
+out_timeline:
+ i915_timeline_fini(&frame->timeline);
+out_frame:
+ kfree(frame);
+ return dw;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -632,21 +712,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
}
}
- ret = intel_engine_init_breadcrumbs(engine);
- if (ret)
+ ret = measure_breadcrumb_dw(engine);
+ if (ret < 0)
goto err_unpin_preempt;
- if (HWS_NEEDS_PHYSICAL(i915))
- ret = init_phys_status_page(engine);
- else
- ret = init_status_page(engine);
- if (ret)
- goto err_breadcrumbs;
+ engine->emit_fini_breadcrumb_dw = ret;
return 0;
-err_breadcrumbs:
- intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
if (i915->preempt_context)
__intel_context_unpin(i915->preempt_context, engine);
@@ -769,12 +842,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
- const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 mcr_s_ss_select;
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
- if (IS_GEN10(dev_priv))
+ if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@@ -786,15 +859,15 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
return mcr_s_ss_select;
}
-static inline uint32_t
+static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
- uint32_t mcr_slice_subslice_mask;
- uint32_t mcr_slice_subslice_select;
- uint32_t default_mcr_s_ss_select;
- uint32_t mcr;
- uint32_t ret;
+ u32 mcr_slice_subslice_mask;
+ u32 mcr_slice_subslice_select;
+ u32 default_mcr_s_ss_select;
+ u32 mcr;
+ u32 ret;
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -900,10 +973,15 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ intel_wakeref_t wakeref;
bool idle = true;
+ if (I915_SELFTEST_ONLY(!engine->mmio_base))
+ return true;
+
/* If the whole device is asleep, the engine must be idle */
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return true;
/* First check that no commands are left in the ring */
@@ -915,7 +993,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return idle;
}
@@ -939,9 +1017,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
return false;
- if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
- return true;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -967,10 +1042,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Ring stopped? */
- if (!ring_is_idle(engine))
- return false;
-
- return true;
+ return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
@@ -1014,7 +1086,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
* the last request that remains in the timeline. When idle, it is
* the last executed context as tracked by retirement.
*/
- rq = __i915_gem_active_peek(&engine->timeline.last_request);
+ rq = __i915_active_request_peek(&engine->timeline.last_request);
if (rq)
return rq->hw_context == kernel_context;
else
@@ -1030,26 +1102,36 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
+static bool reset_engines(struct drm_i915_private *i915)
+{
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
/**
* intel_engines_sanitize: called after the GPU has lost power
* @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_engines_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_engines_sanitize(struct drm_i915_private *i915)
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
- for_each_engine(engine, i915, id) {
- if (engine->reset.reset)
- engine->reset.reset(engine, NULL);
- }
+ if (!reset_engines(i915) && !force)
+ return;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_reset(engine, false);
}
/**
@@ -1085,7 +1167,7 @@ void intel_engines_park(struct drm_i915_private *i915)
}
/* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
+ GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);
@@ -1201,10 +1283,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
+ drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
prefix,
rq->global_seqno,
- i915_request_completed(rq) ? "!" : "",
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &rq->fence.flags) ? "+" : "",
rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
@@ -1248,7 +1334,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
- if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+ if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
@@ -1269,16 +1355,6 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
}
- if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
- drm_printf(m, "\tSYNC_0: 0x%08x\n",
- I915_READ(RING_SYNC_0(engine->mmio_base)));
- drm_printf(m, "\tSYNC_1: 0x%08x\n",
- I915_READ(RING_SYNC_1(engine->mmio_base)));
- if (HAS_VEBOX(dev_priv))
- drm_printf(m, "\tSYNC_2: 0x%08x\n",
- I915_READ(RING_SYNC_2(engine->mmio_base)));
- }
-
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
@@ -1305,7 +1381,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
}
if (HAS_EXECLISTS(dev_priv)) {
- const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ const u32 *hws =
+ &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int idx;
u8 read, write;
@@ -1348,9 +1425,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
+ "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
idx, count,
- i915_ggtt_offset(rq->ring->vma));
+ i915_ggtt_offset(rq->ring->vma),
+ rq->timeline->hwsp_offset);
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1405,14 +1483,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
- const int MAX_REQUESTS_TO_SHOW = 8;
- struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct i915_request *rq, *last;
- unsigned long flags;
- struct rb_node *rb;
- int count;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
if (header) {
va_list ap;
@@ -1462,85 +1535,30 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ rq->timeline->hwsp_offset);
print_request_ring(m, rq);
}
rcu_read_unlock();
- if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+ if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915);
+ intel_runtime_pm_put(engine->i915, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- local_irq_save(flags);
- spin_lock(&engine->timeline.lock);
-
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->timeline.requests, link) {
- if (count++ < MAX_REQUESTS_TO_SHOW - 1)
- print_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > MAX_REQUESTS_TO_SHOW) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - MAX_REQUESTS_TO_SHOW);
- }
- print_request(m, last, "\t\tE ");
- }
-
- last = NULL;
- count = 0;
- drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < MAX_REQUESTS_TO_SHOW - 1)
- print_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > MAX_REQUESTS_TO_SHOW) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - MAX_REQUESTS_TO_SHOW);
- }
- print_request(m, last, "\t\tQ ");
- }
-
- spin_unlock(&engine->timeline.lock);
-
- spin_lock(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
- drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
- w->tsk->comm, w->tsk->pid,
- task_state_to_char(w->tsk),
- w->seqno);
- }
- spin_unlock(&b->rb_lock);
- local_irq_restore(flags);
-
- drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
- engine->irq_posted,
- yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)));
+ intel_execlists_show_requests(engine, m, print_request, 8);
drm_printf(m, "HWSP:\n");
- hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
+ hexdump(m, engine->status_page.addr, PAGE_SIZE);
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
+
+ intel_engine_print_breadcrumbs(engine, m);
}
static u8 user_class_map[] = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index f23570c44323..656e684e7c9a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -38,6 +38,8 @@
* forcibly disable it to allow proper screen updates.
*/
+#include <drm/drm_fourcc.h>
+
#include "intel_drv.h"
#include "i915_drv.h"
@@ -84,7 +86,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -127,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -136,7 +138,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG(i), 0);
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
u32 fbc_ctl2;
/* Set it up... */
@@ -233,9 +235,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
dpfc_ctl |= params->vma->fence->id;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
@@ -243,7 +245,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
params->crtc.fence_y_offset);
}
} else {
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA, 0);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
}
@@ -282,7 +284,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -581,10 +583,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (stride < 512)
return false;
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
+ if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
return stride == 4096 || stride == 8192;
- if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
+ if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
return false;
if (stride > 16384)
@@ -594,7 +596,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
}
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
- uint32_t pixel_format)
+ u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_XRGB8888:
@@ -603,7 +605,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
@@ -626,7 +628,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -784,7 +789,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (IS_GEN(dev_priv, 9, 10) &&
+ if (IS_GEN_RANGE(dev_priv, 9, 10) &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
@@ -839,7 +844,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
32 * fbc->threshold) * 8;
}
@@ -1126,8 +1131,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (!fbc_supported(dev_priv))
return;
- WARN_ON(crtc->active);
-
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4ee16b264dbe..e8f694b57b8a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -37,9 +37,10 @@
#include <linux/init.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
@@ -178,8 +179,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
- struct fb_info *info;
struct drm_framebuffer *fb;
+ intel_wakeref_t wakeref;
+ struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
@@ -210,7 +212,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -277,7 +279,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -285,7 +287,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77c123cc8817..f33de4be4b89 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
- DE_PIPEB_FIFO_UNDERRUN;
+ u32 bit = (pipe == PIPE_A) ?
+ DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
ilk_enable_display_irq(dev_priv, bit);
@@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = I915_READ(GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t bit = (pch_transcoder == PIPE_A) ?
- SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+ u32 bit = (pch_transcoder == PIPE_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
@@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- uint32_t serr_int = I915_READ(SERR_INT);
+ u32 serr_int = I915_READ(SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@@ -258,11 +258,11 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (INTEL_GEN(dev_priv) >= 8)
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -369,7 +369,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return;
/* GMCH can't disable fifo underruns, filter them. */
- if (HAS_GMCH_DISPLAY(dev_priv) &&
+ if (HAS_GMCH(dev_priv) &&
crtc->cpu_fifo_underrun_disabled)
return;
@@ -421,9 +421,9 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (crtc->cpu_fifo_underrun_disabled)
continue;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
i9xx_check_fifo_underruns(crtc);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ivybridge_check_fifo_underruns(crtc);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index c3379bde266f..16f253deaf8d 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -60,7 +60,6 @@
* functions is deprecated and should be avoided.
*/
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
index 105e2a9e874a..b96a31bc1080 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -112,7 +112,6 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
-#define MI_STORE_DWORD_INDEX_SHIFT 2
/*
* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
* - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 0f1c4f9ebfd8..744220296653 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -192,4 +192,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
spin_unlock_irq(&guc->irq_lock);
}
+int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a67144ee5ceb..13ff7003c6be 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->path = I915_KBL_GUC_UCODE;
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
- } else {
- dev_info(dev_priv->drm.dev,
- "%s: No firmware known for this platform!\n",
- intel_uc_fw_type_repr(guc_fw->type));
}
}
@@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN(dev_priv, 9)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index d3ebdbc0182e..806fdfd7c78a 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -140,6 +140,9 @@ static struct dentry *create_buf_file_callback(const char *filename,
buf_file = debugfs_create_file(filename, mode,
parent, buf, &relay_file_operations);
+ if (IS_ERR(buf_file))
+ return NULL;
+
return buf_file;
}
@@ -436,6 +439,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -443,9 +447,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- intel_runtime_pm_get(dev_priv);
- guc_action_flush_log_complete(guc);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ guc_action_flush_log_complete(guc);
}
int intel_guc_log_create(struct intel_guc_log *log)
@@ -505,7 +508,8 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
+ intel_wakeref_t wakeref;
+ int ret = 0;
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
@@ -519,16 +523,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
mutex_lock(&dev_priv->drm.struct_mutex);
- if (log->level == level) {
- ret = 0;
+ if (log->level == level)
goto out_unlock;
- }
- intel_runtime_pm_get(dev_priv);
- ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
- GUC_LOG_LEVEL_IS_ENABLED(level),
- GUC_LOG_LEVEL_TO_VERBOSITY(level));
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ ret = guc_action_control_log(guc,
+ GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
@@ -601,6 +603,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
/*
* Before initiating the forceful flush, wait for any pending/ongoing
@@ -608,9 +611,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
*/
flush_work(&log->relay.flush_work);
- intel_runtime_pm_get(i915);
- guc_action_flush_log(guc);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1570dcbe249c..8bc8aa54aa35 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -81,6 +81,12 @@
*
*/
+static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -572,7 +578,8 @@ static void inject_preempt_context(struct work_struct *work)
if (engine->id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
- addr);
+ addr,
+ PIPE_CONTROL_CS_STALL);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
@@ -622,6 +629,8 @@ static void inject_preempt_context(struct work_struct *work)
EXECLISTS_ACTIVE_PREEMPT);
tasklet_schedule(&engine->execlists.tasklet);
}
+
+ (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
}
/*
@@ -665,7 +674,7 @@ static void complete_preempt_context(struct intel_engine_cs *engine)
execlists_unwind_incomplete_requests(execlists);
wait_for_guc_preempt_report(engine);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+ intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
}
/**
@@ -730,7 +739,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
if (intel_engine_has_preemption(engine)) {
struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id];
- int prio = execlists->queue_priority;
+ int prio = execlists->queue_priority_hint;
if (__execlists_need_preempt(prio, port_prio(port))) {
execlists_set_active(execlists,
@@ -776,7 +785,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
kmem_cache_free(engine->i915->priorities, p);
}
done:
- execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
+ execlists->queue_priority_hint =
+ rb ? to_priolist(rb)->priority : INT_MIN;
if (submit)
port_assign(port, last);
if (last)
@@ -823,7 +833,7 @@ static void guc_submission_tasklet(unsigned long data)
}
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
- intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
+ intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
GUC_PREEMPT_FINISHED)
complete_preempt_context(engine);
@@ -833,8 +843,7 @@ static void guc_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static struct i915_request *
-guc_reset_prepare(struct intel_engine_cs *engine)
+static void guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -860,8 +869,6 @@ guc_reset_prepare(struct intel_engine_cs *engine)
*/
if (engine->i915->guc.preempt_wq)
flush_workqueue(engine->i915->guc.preempt_wq);
-
- return i915_gem_find_active_request(engine);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index c22b3e18a0f5..1d7d26e4cf14 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_BROXTON(dev_priv))
return true;
+ if (IS_COFFEELAKE(dev_priv))
+ return true;
+
return false;
}
@@ -105,15 +108,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -EIO;
}
- /*
- * We're not in host or fail to find a MPT module, disable GVT-g
- */
- ret = intel_gvt_init_host();
- if (ret) {
- DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
- goto bail;
- }
-
ret = intel_gvt_init_device(dev_priv);
if (ret) {
DRM_DEBUG_DRIVER("Fail to init GVT device\n");
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index e26d05a46451..a219c796e56d 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -23,144 +23,18 @@
*/
#include "i915_drv.h"
+#include "i915_reset.h"
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- struct intel_engine_cs *signaller;
- enum intel_engine_id id;
-
- for_each_engine(signaller, dev_priv, id) {
- if (engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
- engine->name, ipehr);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
+struct hangcheck {
+ u64 acthd;
u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (intel_engine_signaled(signaller, seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- engine->hangcheck.deadlock = 0;
-}
+ enum intel_engine_hangcheck_action action;
+ unsigned long action_timestamp;
+ int deadlock;
+ struct intel_instdone instdone;
+ bool wedged:1;
+ bool stalled:1;
+};
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
@@ -236,7 +110,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != ENGINE_DEAD)
return ha;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -252,54 +126,26 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
return ENGINE_WAIT_KICK;
}
- if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return ENGINE_DEAD;
- case 1:
- i915_handle_error(dev_priv, ALL_ENGINES, 0,
- "stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return ENGINE_WAIT_KICK;
- case 0:
- return ENGINE_WAIT;
- }
- }
-
return ENGINE_DEAD;
}
static void hangcheck_load_sample(struct intel_engine_cs *engine,
- struct intel_engine_hangcheck *hc)
+ struct hangcheck *hc)
{
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
hc->acthd = intel_engine_get_active_head(engine);
hc->seqno = intel_engine_get_seqno(engine);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
- const struct intel_engine_hangcheck *hc)
+ const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
engine->hangcheck.seqno = hc->seqno;
- engine->hangcheck.action = hc->action;
- engine->hangcheck.stalled = hc->stalled;
- engine->hangcheck.wedged = hc->wedged;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
- const struct intel_engine_hangcheck *hc)
+ const struct hangcheck *hc)
{
if (engine->hangcheck.seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
@@ -311,7 +157,7 @@ hangcheck_get_action(struct intel_engine_cs *engine,
}
static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
- struct intel_engine_hangcheck *hc)
+ struct hangcheck *hc)
{
unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
@@ -357,10 +203,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
break;
case ENGINE_DEAD:
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer("hangcheck");
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
break;
default:
@@ -431,24 +273,35 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_hangcheck hc;
+ struct hangcheck hc;
- semaphore_clear_deadlocks(dev_priv);
+ intel_engine_signal_breadcrumbs(engine);
hangcheck_load_sample(engine, &hc);
hangcheck_accumulate_sample(engine, &hc);
hangcheck_store_sample(engine, &hc);
- if (engine->hangcheck.stalled) {
+ if (hc.stalled) {
hung |= intel_engine_flag(engine);
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
- if (engine->hangcheck.wedged)
+ if (hc.wedged)
wedged |= intel_engine_flag(engine);
}
+ if (GEM_SHOW_DEBUG() && (hung | stuck)) {
+ struct drm_printer p = drm_debug_printer("hangcheck");
+
+ for_each_engine(engine, dev_priv, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
if (wedged) {
dev_err(dev_priv->drm.dev,
"GPU recovery timed out,"
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 1bf487f94254..ce7ba3a9c000 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,7 +6,6 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drmP.h>
#include <drm/drm_hdcp.h>
#include <linux/i2c.h>
#include <linux/random.h>
@@ -15,6 +14,7 @@
#include "i915_reg.h"
#define KEY_LOAD_TRIES 5
+#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -157,10 +157,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/*
* Initiate loading the HDCP key from fuses.
*
- * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
- * differ in the key load trigger process from other platforms.
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
+ * platforms except BXT and GLK, differ in the key load trigger process
+ * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
*/
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
@@ -636,7 +637,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
/* Wait for encryption confirmation */
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
- HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
+ HDCP_STATUS_ENC, HDCP_STATUS_ENC,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -666,7 +668,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
I915_WRITE(PORT_HDCP_CONF(port), 0);
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
- 20)) {
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -768,8 +770,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
- !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
+ return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
}
int intel_hdcp_init(struct intel_connector *connector,
@@ -837,8 +838,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
{
- uint64_t old_cp = old_state->content_protection;
- uint64_t new_cp = new_state->content_protection;
+ u64 old_cp = old_state->content_protection;
+ u64 new_cp = new_state->content_protection;
struct drm_crtc_state *crtc_state;
if (!new_state->crtc) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 07e803a604bd..f125a62eba8c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -479,18 +478,14 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- struct drm_connector *connector = &intel_hdmi->attached_connector->base;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
- connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- adjusted_mode,
- is_hdmi2_sink);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@@ -503,12 +498,12 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable,
- is_hdmi2_sink);
+ HDMI_QUANTIZATION_RANGE_FULL);
drm_hdmi_avi_infoframe_content_type(&frame.avi,
conn_state);
@@ -1191,15 +1186,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -1591,7 +1588,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (hdmi->has_hdmi_sink && !force_dvi) {
/* if we can't do 8bpc we may still be able to do 12bpc */
- if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+ if (status != MODE_OK && !HAS_GMCH(dev_priv))
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
true, force_dvi);
@@ -1616,7 +1613,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
&crtc_state->base.adjusted_mode;
int i;
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
return false;
if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
@@ -1707,9 +1704,9 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
-bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1725,7 +1722,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
@@ -1756,7 +1753,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
&clock_12bpc, &clock_10bpc,
&clock_8bpc)) {
DRM_ERROR("Can't support YCBCR420 output\n");
- return false;
+ return -EINVAL;
}
}
@@ -1806,7 +1803,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
false, force_dvi) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
- return false;
+ return -EINVAL;
}
/* Set user selected PAR to incoming mode's member */
@@ -1825,7 +1822,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
- return true;
+ return 0;
}
static void
@@ -1835,7 +1832,6 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
- intel_hdmi->rgb_quant_range_selectable = false;
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
@@ -1896,11 +1892,12 @@ intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
@@ -1915,13 +1912,10 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
- intel_hdmi->rgb_quant_range_selectable =
- drm_rgb_quant_range_selectable(edid);
-
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
@@ -1940,11 +1934,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (IS_ICELAKE(dev_priv) &&
!intel_digital_port_connected(encoder))
@@ -1956,7 +1951,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
@@ -2155,7 +2150,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index e24174d08fed..b8937c788f03 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -227,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ intel_wakeref_t wakeref;
enum hpd_pin pin;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
@@ -262,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
@@ -470,7 +470,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
+ WARN_ONCE(!HAS_GMCH(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", pin);
continue;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index bc27b691d824..9bd1c9002c2a 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -115,14 +115,14 @@ fail:
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- bool status;
+ intel_wakeref_t wakeref;
+ bool status = false;
if (!HAS_HUC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
- status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index f93d2384d482..7d7bfc7f7ca7 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -23,8 +23,8 @@
*/
#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 07
-#define BXT_BLD_NUM 1398
+#define BXT_HUC_FW_MINOR 8
+#define BXT_BLD_NUM 2893
#define SKL_HUC_FW_MAJOR 01
#define SKL_HUC_FW_MINOR 07
@@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else {
- DRM_WARN("%s: No firmware known for this platform!\n",
- intel_uc_fw_type_repr(huc_fw->type));
}
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 802d0394ccc4..5a733e711355 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -29,7 +29,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/export.h>
-#include <drm/drmP.h>
#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -698,12 +697,13 @@ out:
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
- struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus =
+ container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
+ intel_wakeref_t wakeref;
int ret;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -715,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus =
+ container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int ret;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@@ -742,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
.buf = buf,
}
};
+ intel_wakeref_t wakeref;
+ int ret;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
/*
@@ -754,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
mutex_unlock(&dev_priv->gmbus_mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -822,7 +823,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
- else if (!HAS_GMCH_DISPLAY(dev_priv))
+ else if (!HAS_GMCH(dev_priv))
/*
* Broxton uses the same PCH offsets for South Display Engine,
* even though it doesn't have a PCH.
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 5d5336fbe7b0..f8239bca3820 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -65,6 +65,7 @@
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
#include "i915_drv.h"
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index eab9341a5152..5e98fd79bd9d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -133,10 +133,10 @@
*/
#include <linux/interrupt.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
+#include "i915_reset.h"
#include "i915_vgpu.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
@@ -172,6 +172,12 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_INDEX_ADDR);
+}
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -182,13 +188,90 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+ struct i915_priolist *p;
+ struct rb_node *rb;
+
+ rb = rb_first_cached(&execlists->queue);
+ if (!rb)
+ return INT_MIN;
+
+ /*
+ * As the priolist[] are inverted, with the highest priority in [0],
+ * we have to flip the index value to become priority.
+ */
+ p = to_priolist(rb);
+ return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *last,
- int prio)
+ const struct i915_request *rq)
{
- return (intel_engine_has_preemption(engine) &&
- __execlists_need_preempt(prio, rq_prio(last)) &&
- !i915_request_completed(last));
+ const int last_prio = rq_prio(rq);
+
+ if (!intel_engine_has_preemption(engine))
+ return false;
+
+ if (i915_request_completed(rq))
+ return false;
+
+ /*
+ * Check if the current priority hint merits a preemption attempt.
+ *
+ * We record the highest value priority we saw during rescheduling
+ * prior to this dequeue, therefore we know that if it is strictly
+ * less than the current tail of ESLP[0], we do not need to force
+ * a preempt-to-idle cycle.
+ *
+ * However, the priority hint is a mere hint that we may need to
+ * preempt. If that hint is stale or we may be trying to preempt
+ * ourselves, ignore the request.
+ */
+ if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
+ last_prio))
+ return false;
+
+ /*
+ * Check against the first request in ELSP[1], it will, thanks to the
+ * power of PI, be the highest priority of that context.
+ */
+ if (!list_is_last(&rq->link, &engine->timeline.requests) &&
+ rq_prio(list_next_entry(rq, link)) > last_prio)
+ return true;
+
+ /*
+ * If the inflight context did not trigger the preemption, then maybe
+ * it was the set of queued requests? Pick the highest priority in
+ * the queue (the first active priolist) and see if it deserves to be
+ * running instead of ELSP[0].
+ *
+ * The highest priority request in the queue can not be either
+ * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+ * context, it's priority would not exceed ELSP[0] aka last_prio.
+ */
+ return queue_prio(&engine->execlists) > last_prio;
+}
+
+__maybe_unused static inline bool
+assert_priority_queue(const struct intel_engine_execlists *execlists,
+ const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ if (!prev)
+ return true;
+
+ /*
+ * Without preemption, the prev may refer to the still active element
+ * which we refuse to let go.
+ *
+ * Even with preemption, there are times when we think it is better not
+ * to preempt and leave an ostensibly lower priority request in flight.
+ */
+ if (port_request(execlists->port) == prev)
+ return true;
+
+ return rq_prio(prev) >= rq_prio(next);
}
/*
@@ -265,7 +348,8 @@ static void unwind_wa_tail(struct i915_request *rq)
assert_ring_tail_valid(rq->ring, rq->tail);
}
-static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
+static struct i915_request *
+__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
@@ -307,6 +391,8 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
}
+
+ return active;
}
void
@@ -364,31 +450,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
trace_i915_request_out(rq);
}
-static void
-execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
-{
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-}
-
static u64 execlists_update_context(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_context *ce = rq->hw_context;
- u32 *reg_state = ce->lrc_reg_state;
-
- reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
- /*
- * True 32b PPGTT with dynamic page allocation: update PDP
- * registers and point the unallocated PDPs to scratch page.
- * PML4 is allocated during ppgtt init, so this is not needed
- * in 48-bit mode.
- */
- if (!i915_vm_is_48bit(&ppgtt->vm))
- execlists_update_context_pdps(ppgtt, reg_state);
+ ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+ intel_ring_set_tail(rq->ring, rq->tail);
/*
* Make sure the context image is complete before we submit it to HW.
@@ -456,11 +523,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
rq->global_seqno,
rq->fence.context, rq->fence.seqno,
+ hwsp_seqno(rq),
intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
@@ -532,6 +600,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
static void complete_preempt_context(struct intel_engine_execlists *execlists)
@@ -600,7 +670,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
return;
- if (need_preempt(engine, last, execlists->queue_priority)) {
+ if (need_preempt(engine, last)) {
inject_preempt_context(engine);
return;
}
@@ -633,7 +703,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent
* ring:HEAD == rq:TAIL as we resubmit the
- * request. See gen8_emit_breadcrumb() for
+ * request. See gen8_emit_fini_breadcrumb() for
* where we prepare the padding after the
* end of the request.
*/
@@ -646,8 +716,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- GEM_BUG_ON(last &&
- need_preempt(engine, last, rq_prio(rq)));
+ GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
/*
* Can we combine this request with the current port?
@@ -708,20 +777,20 @@ done:
/*
* Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
*
- * We choose queue_priority such that if we add a request of greater
+ * We choose the priority hint such that if we add a request of greater
* priority than this, we kick the submission tasklet to decide on
* the right order of submitting the requests to hardware. We must
* also be prepared to reorder requests as they are in-flight on the
- * HW. We derive the queue_priority then as the first "hole" in
+ * HW. We derive the priority hint then as the first "hole" in
* the HW submission ports and if there are no available slots,
* the priority of the lowest executing request, i.e. last.
*
* When we do receive a higher priority request ready to run from the
- * user, see queue_request(), the queue_priority is bumped to that
+ * user, see queue_request(), the priority hint is bumped to that
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
- execlists->queue_priority =
+ execlists->queue_priority_hint =
port != execlists->port ? rq_prio(last) : INT_MIN;
if (submit) {
@@ -752,11 +821,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
+ GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
rq->global_seqno,
rq->fence.context, rq->fence.seqno,
+ hwsp_seqno(rq),
intel_engine_get_seqno(rq->engine));
GEM_BUG_ON(!execlists->active);
@@ -774,6 +844,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_clear_all_active(execlists);
}
+static inline void
+invalidate_csb_entries(const u32 *first, const u32 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
@@ -789,6 +866,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
*/
execlists->csb_head = reset_value;
WRITE_ONCE(*execlists->csb_write, reset_value);
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
}
static void nop_submission_tasklet(unsigned long data)
@@ -830,10 +910,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
- continue;
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
}
/* Flush the queued requests to the timeline list (for retiring). */
@@ -843,9 +923,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
priolist_for_each_request_consume(rq, rn, p, i) {
list_del_init(&rq->sched.link);
-
- dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -859,7 +939,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
- execlists->queue_priority = INT_MIN;
+ execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
@@ -882,6 +962,8 @@ static void process_csb(struct intel_engine_cs *engine)
const u32 * const buf = execlists->csb_status;
u8 head, tail;
+ lockdep_assert_held(&engine->timeline.lock);
+
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
* When reading from the csb_write mmio register, we have to be
@@ -970,12 +1052,13 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
+ rq ? hwsp_seqno(rq) : 0,
intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
@@ -1024,6 +1107,19 @@ static void process_csb(struct intel_engine_cs *engine)
} while (head != tail);
execlists->csb_head = head;
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
}
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1046,7 +1142,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s awake?=%d, active=%x\n",
engine->name,
- engine->i915->gt.awake,
+ !!engine->i915->gt.awake,
engine->execlists.active);
spin_lock_irqsave(&engine->timeline.lock, flags);
@@ -1076,8 +1172,8 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
- if (prio > engine->execlists.queue_priority) {
- engine->execlists.queue_priority = prio;
+ if (prio > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = prio;
__submit_queue_imm(engine);
}
}
@@ -1170,6 +1266,23 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, 0, flags);
}
+static void
+__execlists_update_reg_state(struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ u32 *regs = ce->lrc_reg_state;
+ struct intel_ring *ring = ce->ring;
+
+ regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD + 1] = ring->head;
+ regs[CTX_RING_TAIL + 1] = ring->tail;
+
+ /* RPCS */
+ if (engine->class == RENDER_CLASS)
+ regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
+ &ce->sseu);
+}
+
static struct intel_context *
__execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
@@ -1208,10 +1321,8 @@ __execlists_context_pin(struct intel_engine_cs *engine,
GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
- i915_ggtt_offset(ce->ring->vma);
- ce->lrc_reg_state[CTX_RING_HEAD + 1] = ce->ring->head;
- ce->lrc_reg_state[CTX_RING_TAIL + 1] = ce->ring->tail;
+
+ __execlists_update_reg_state(engine, ce);
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
@@ -1251,29 +1362,116 @@ execlists_context_pin(struct intel_engine_cs *engine,
return __execlists_context_pin(engine, ctx, ce);
}
+static int gen8_emit_init_breadcrumb(struct i915_request *rq)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Check if we have been preempted before we even get started.
+ *
+ * After this point i915_request_started() reports true, even if
+ * we get preempted and so are no longer running.
+ */
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = 0;
+ *cs++ = rq->fence.seqno - 1;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ /* Be doubly sure the LRI have landed before proceeding */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Re-invalidate the TLB for luck */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
GEM_BUG_ON(!request->hw_context->pin_count);
- /* Flush enough space to reduce the likelihood of waiting after
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
- if (ret)
- return ret;
-
- /* Note that after this point, we have committed to using
+ /*
+ * Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ else
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
}
@@ -1596,7 +1794,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
/*
* Make sure we're not enabling the new 12-deep CSB
@@ -1617,7 +1815,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(STOP_RING));
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- engine->status_page.ggtt_offset);
+ i915_ggtt_offset(engine->status_page.vma));
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}
@@ -1637,6 +1835,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
intel_mocs_init_engine(engine);
@@ -1653,48 +1852,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
return 0;
}
-static int gen8_init_render_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_common_ring(engine);
- if (ret)
- return ret;
-
- intel_engine_apply_whitelist(engine);
-
- /* We need to disable the AsyncFlip performance optimisations in order
- * to use MI_WAIT_FOR_EVENT within the CS. It should already be
- * programmed to '1' on all products.
- *
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
- */
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
- return 0;
-}
-
-static int gen9_init_render_ring(struct intel_engine_cs *engine)
-{
- int ret;
-
- ret = gen8_init_common_ring(engine);
- if (ret)
- return ret;
-
- intel_engine_apply_whitelist(engine);
-
- return 0;
-}
-
-static struct i915_request *
-execlists_reset_prepare(struct intel_engine_cs *engine)
+static void execlists_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *request, *active;
unsigned long flags;
GEM_TRACE("%s: depth<-%d\n", engine->name,
@@ -1710,59 +1870,21 @@ execlists_reset_prepare(struct intel_engine_cs *engine)
* prevents the race.
*/
__tasklet_disable_sync_once(&execlists->tasklet);
+ GEM_BUG_ON(!reset_in_progress(execlists));
+ /* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
-
- /*
- * We want to flush the pending context switches, having disabled
- * the tasklet above, we can assume exclusive access to the execlists.
- * For this allows us to catch up with an inflight preemption event,
- * and avoid blaming an innocent request if the stall was due to the
- * preemption itself.
- */
- process_csb(engine);
-
- /*
- * The last active request can then be no later than the last request
- * now in ELSP[0]. So search backwards from there, so that if the GPU
- * has advanced beyond the last CSB update, it will be pardoned.
- */
- active = NULL;
- request = port_request(execlists->port);
- if (request) {
- /*
- * Prevent the breadcrumb from advancing before we decide
- * which request is currently active.
- */
- intel_engine_stop_cs(engine);
-
- list_for_each_entry_from_reverse(request,
- &engine->timeline.requests,
- link) {
- if (__i915_request_completed(request,
- request->global_seqno))
- break;
-
- active = request;
- }
- }
-
+ process_csb(engine); /* drain preemption events */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- return active;
}
-static void execlists_reset(struct intel_engine_cs *engine,
- struct i915_request *request)
+static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq;
unsigned long flags;
u32 *regs;
- GEM_TRACE("%s request global=%d, current=%d\n",
- engine->name, request ? request->global_seqno : 0,
- intel_engine_get_seqno(engine));
-
spin_lock_irqsave(&engine->timeline.lock, flags);
/*
@@ -1777,12 +1899,18 @@ static void execlists_reset(struct intel_engine_cs *engine,
execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- __unwind_incomplete_requests(engine);
+ rq = __unwind_incomplete_requests(engine);
/* Following the reset, we need to reload the CSB read/write pointers */
reset_csb_pointers(&engine->execlists);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
+ engine->name,
+ rq ? rq->global_seqno : 0,
+ intel_engine_get_seqno(engine),
+ yesno(stalled));
+ if (!rq)
+ goto out_unlock;
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1795,8 +1923,9 @@ static void execlists_reset(struct intel_engine_cs *engine,
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- if (!request || request->fence.error != -EIO)
- return;
+ i915_reset_request(rq, stalled);
+ if (!stalled)
+ goto out_unlock;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -1806,25 +1935,22 @@ static void execlists_reset(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = request->hw_context->lrc_reg_state;
+ regs = rq->hw_context->lrc_reg_state;
if (engine->pinned_default_state) {
memcpy(regs, /* skip restoring the vanilla PPHWSP */
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
engine->context_size - PAGE_SIZE);
}
- execlists_init_reg_state(regs,
- request->gem_context, engine, request->ring);
/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
- regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+ rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
+ intel_ring_update_space(rq->ring);
- request->ring->head = intel_ring_wrap(request->ring, request->postfix);
- regs[CTX_RING_HEAD + 1] = request->ring->head;
+ execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
+ __execlists_update_reg_state(engine, rq->hw_context);
- intel_ring_update_space(request->ring);
-
- /* Reset WaIdleLiteRestore:bdw,skl as well */
- unwind_wa_tail(request);
+out_unlock:
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -1837,6 +1963,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* to sleep before we restart and reload a context.
*
*/
+ GEM_BUG_ON(!reset_in_progress(execlists));
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
execlists->tasklet.func(execlists->tasklet.data);
@@ -1845,56 +1972,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
atomic_read(&execlists->tasklet.count));
}
-static int intel_logical_ring_emit_pdps(struct i915_request *rq)
-{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
- struct intel_engine_cs *engine = rq->engine;
- const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
- u32 *cs;
- int i;
-
- cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
u32 *cs;
- int ret;
-
- /* Don't rely in hw updating PDPs, specially in lite-restore.
- * Ideally, we should set Force PD Restore in ctx descriptor,
- * but we can't. Force Restore would be a second option, but
- * it is unsafe in case of lite-restore (because the ctx is
- * not idle). PML4 is allocated during ppgtt init so this is
- * not needed in 48-bit.*/
- if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
- !intel_vgpu_active(rq->i915)) {
- ret = intel_logical_ring_emit_pdps(rq);
- if (ret)
- return ret;
-
- rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
- }
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1927,6 +2009,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
+
intel_ring_advance(rq, cs);
return 0;
@@ -2011,7 +2094,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(request->i915))
+ if (IS_GEN(request->i915, 9))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
@@ -2053,45 +2136,62 @@ static int gen8_emit_flush_render(struct i915_request *request,
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
{
/* Ensure there's always at least one preemption point per-request. */
*cs++ = MI_ARB_CHECK;
*cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs);
+
+ return cs;
}
-static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- cs = gen8_emit_ggtt_write(cs, request->global_seqno,
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset);
+
+ cs = gen8_emit_ggtt_write(cs,
+ request->global_seqno,
intel_hws_seqno_address(request->engine));
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
- gen8_emit_wa_tail(request, cs);
+ return gen8_emit_wa_tail(request, cs);
}
-static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
-static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- /* We're using qword write, seqno should be aligned to 8 bytes. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->global_seqno,
+ intel_hws_seqno_address(request->engine),
+ PIPE_CONTROL_CS_STALL);
- cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
- intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
- gen8_emit_wa_tail(request, cs);
+ return gen8_emit_wa_tail(request, cs);
}
-static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct i915_request *rq)
{
@@ -2183,8 +2283,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
- engine->emit_breadcrumb = gen8_emit_breadcrumb;
- engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
+ engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
engine->set_default_submission = intel_execlists_set_default_submission;
@@ -2223,10 +2323,14 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static void
+static int
logical_ring_setup(struct intel_engine_cs *engine)
{
- intel_engine_setup_common(engine);
+ int err;
+
+ err = intel_engine_setup_common(engine);
+ if (err)
+ return err;
/* Intentionally left blank. */
engine->buffer = NULL;
@@ -2236,6 +2340,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+
+ return 0;
}
static int logical_ring_init(struct intel_engine_cs *engine)
@@ -2270,10 +2376,10 @@ static int logical_ring_init(struct intel_engine_cs *engine)
}
execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+ &engine->status_page.addr[intel_hws_csb_write_index(i915)];
reset_csb_pointers(execlists);
@@ -2282,23 +2388,16 @@ static int logical_ring_init(struct intel_engine_cs *engine)
int logical_render_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
int ret;
- logical_ring_setup(engine);
-
- if (HAS_L3_DPF(dev_priv))
- engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ ret = logical_ring_setup(engine);
+ if (ret)
+ return ret;
/* Override some for render ring. */
- if (INTEL_GEN(dev_priv) >= 9)
- engine->init_hw = gen9_init_render_ring;
- else
- engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
- engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
- engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
ret = logical_ring_init(engine);
if (ret)
@@ -2322,27 +2421,59 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
int logical_xcs_ring_init(struct intel_engine_cs *engine)
{
- logical_ring_setup(engine);
+ int err;
+
+ err = logical_ring_setup(engine);
+ if (err)
+ return err;
return logical_ring_init(engine);
}
-static u32
-make_rpcs(struct drm_i915_private *dev_priv)
+u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu)
{
- bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
- u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
- u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ bool subslice_pg = sseu->has_subslice_pg;
+ struct intel_sseu ctx_sseu;
+ u8 slices, subslices;
u32 rpcs = 0;
/*
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_GEN(dev_priv) < 9)
+ if (INTEL_GEN(i915) < 9)
return 0;
/*
+ * If i915/perf is active, we want a stable powergating configuration
+ * on the system.
+ *
+ * We could choose full enablement, but on ICL we know there are use
+ * cases which disable slices for functional, apart for performance
+ * reasons. So in this case we select a known stable subset.
+ */
+ if (!i915->perf.oa.exclusive_stream) {
+ ctx_sseu = *req_sseu;
+ } else {
+ ctx_sseu = intel_device_default_sseu(i915);
+
+ if (IS_GEN(i915, 11)) {
+ /*
+ * We only need subslice count so it doesn't matter
+ * which ones we select - just turn off low bits in the
+ * amount of half of all available subslices per slice.
+ */
+ ctx_sseu.subslice_mask =
+ ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
+ ctx_sseu.slice_mask = 0x1;
+ }
+ }
+
+ slices = hweight8(ctx_sseu.slice_mask);
+ subslices = hweight8(ctx_sseu.subslice_mask);
+
+ /*
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
* wide and Icelake has up to eight subslices, specfial programming is
* needed in order to correctly enable all subslices.
@@ -2367,7 +2498,9 @@ make_rpcs(struct drm_i915_private *dev_priv)
* subslices are enabled, or a count between one and four on the first
* slice.
*/
- if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
+ if (IS_GEN(i915, 11) &&
+ slices == 1 &&
+ subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
GEM_BUG_ON(subslices & 1);
subslice_pg = false;
@@ -2380,10 +2513,10 @@ make_rpcs(struct drm_i915_private *dev_priv)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+ if (sseu->has_slice_pg) {
u32 mask, val = slices;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
mask = GEN11_RPCS_S_CNT_MASK;
val <<= GEN11_RPCS_S_CNT_SHIFT;
} else {
@@ -2408,18 +2541,16 @@ make_rpcs(struct drm_i915_private *dev_priv)
rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
}
- if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+ if (sseu->has_eu_pg) {
u32 val;
- val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
- GEN8_RPCS_EU_MIN_SHIFT;
+ val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
- val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
- GEN8_RPCS_EU_MAX_SHIFT;
+ val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
@@ -2543,12 +2674,16 @@ static void execlists_init_reg_state(u32 *regs,
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
}
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev_priv));
+ CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
i915_oa_init_reg_state(engine, ctx, regs);
}
@@ -2625,7 +2760,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
- uint32_t context_size;
+ u32 context_size;
struct intel_ring *ring;
struct i915_timeline *timeline;
int ret;
@@ -2651,7 +2786,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj;
}
- timeline = i915_timeline_create(ctx->i915, ctx->name);
+ timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
@@ -2709,14 +2844,70 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
intel_ring_reset(ce->ring, 0);
- if (ce->pin_count) { /* otherwise done in context_pin */
- u32 *regs = ce->lrc_reg_state;
+ if (ce->pin_count) /* otherwise done in context_pin */
+ __execlists_update_reg_state(engine, ce);
+ }
+ }
+}
+
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ struct i915_request *rq,
+ const char *prefix),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
- regs[CTX_RING_HEAD + 1] = ce->ring->head;
- regs[CTX_RING_TAIL + 1] = ce->ring->tail;
- }
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tE ");
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\tE ");
+ }
+
+ last = NULL;
+ count = 0;
+ if (execlists->queue_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tQueue priority hint: %d\n",
+ execlists->queue_priority_hint);
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tQ ");
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
}
+ show_request(m, last, "\t\tQ ");
}
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f5a5502ecf70..f1aec8a6986f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -97,11 +97,21 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+struct drm_printer;
+
struct drm_i915_private;
struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ struct i915_request *rq,
+ const char *prefix),
+ unsigned int max);
+
+u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 96a8d9524b0c..322bdddda164 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
}
static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
- uint8_t *avi_buf)
+ u8 *avi_buf)
{
u8 avi_if_ctrl;
u8 block_count = 0;
u8 *data;
- uint16_t reg;
+ u16 reg;
ssize_t ret;
while (block_count < 4) {
@@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
- const uint8_t *frame,
+ const u8 *frame,
ssize_t len)
{
- uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+ u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
/*
* Parade's frames contains 32 bytes of data, divided
@@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
- const uint8_t *buffer, ssize_t len)
+ const u8 *buffer, ssize_t len)
{
int ret;
- uint32_t val = 0;
- uint32_t retry;
- uint16_t reg;
- const uint8_t *data = buffer;
+ u32 val = 0;
+ u32 retry;
+ u16 reg;
+ const u8 *data = buffer;
reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
while (val < len) {
@@ -459,13 +459,11 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
{
ssize_t ret;
union hdmi_infoframe frame;
- uint8_t buf[VIDEO_DIP_DATA_SIZE];
+ u8 buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_connector *connector = &intel_dp->attached_connector->base;
- const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@@ -473,7 +471,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- mode, is_hdmi2_sink);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@@ -488,11 +487,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
}
- drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL,
- false, is_hdmi2_sink);
+ HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e6c5d985ea0a..b4aa49768e90 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -95,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -279,7 +280,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg.
*/
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -379,9 +380,9 @@ intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder =
@@ -395,7 +396,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
DRM_ERROR("Can't support LVDS on pipe A\n");
- return false;
+ return -EINVAL;
}
if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
@@ -421,7 +422,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
adjusted_mode);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
@@ -440,7 +441,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* user's requested refresh rate.
*/
- return true;
+ return 0;
}
static enum drm_connector_status
@@ -797,26 +798,6 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
-static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
-{
- /*
- * With the introduction of the PCH we gained a dedicated
- * LVDS presence pin, use it.
- */
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- return true;
-
- /*
- * Otherwise LVDS was only attached to mobile products,
- * except for the inglorious 830gm
- */
- if (INTEL_GEN(dev_priv) <= 4 &&
- IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
- return true;
-
- return false;
-}
-
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev_priv: i915 device
@@ -841,9 +822,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
u8 pin;
u32 allowed_scalers;
- if (!intel_lvds_supported(dev_priv))
- return;
-
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
WARN(!dev_priv->vbt.int_lvds_support,
@@ -909,6 +887,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
+ intel_encoder->update_pipe = intel_panel_update_backlight;
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -919,7 +898,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev_priv))
+ else if (IS_GEN(dev_priv, 4))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 77e9871a8c9a..331e7a678fb7 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -28,48 +28,60 @@
struct drm_i915_mocs_entry {
u32 control_value;
u16 l3cc_value;
+ u16 used;
};
struct drm_i915_mocs_table {
- u32 size;
+ unsigned int size;
+ unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
};
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
-#define LE_CACHEABILITY(value) ((value) << 0)
-#define LE_TGT_CACHE(value) ((value) << 2)
+#define _LE_CACHEABILITY(value) ((value) << 0)
+#define _LE_TGT_CACHE(value) ((value) << 2)
#define LE_LRUM(value) ((value) << 4)
#define LE_AOM(value) ((value) << 6)
#define LE_RSC(value) ((value) << 7)
#define LE_SCC(value) ((value) << 8)
#define LE_PFM(value) ((value) << 11)
#define LE_SCF(value) ((value) << 14)
+#define LE_COS(value) ((value) << 15)
+#define LE_SSE(value) ((value) << 17)
/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
-#define L3_CACHEABILITY(value) ((value) << 4)
+#define _L3_CACHEABILITY(value) ((value) << 4)
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */
+#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
-#define LE_PAGETABLE 0
-#define LE_UC 1
-#define LE_WT 2
-#define LE_WB 3
-
-/* L3 caching options */
-#define L3_DIRECT 0
-#define L3_UC 1
-#define L3_RESERVED 2
-#define L3_WB 3
+#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
+#define LE_1_UC _LE_CACHEABILITY(1)
+#define LE_2_WT _LE_CACHEABILITY(2)
+#define LE_3_WB _LE_CACHEABILITY(3)
/* Target cache */
-#define LE_TC_PAGETABLE 0
-#define LE_TC_LLC 1
-#define LE_TC_LLC_ELLC 2
-#define LE_TC_LLC_ELLC_ALT 3
+#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0)
+#define LE_TC_1_LLC _LE_TGT_CACHE(1)
+#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2)
+#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3)
+
+/* L3 caching options */
+#define L3_0_DIRECT _L3_CACHEABILITY(0)
+#define L3_1_UC _L3_CACHEABILITY(1)
+#define L3_2_RESERVED _L3_CACHEABILITY(2)
+#define L3_3_WB _L3_CACHEABILITY(3)
+
+#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
+ [__idx] = { \
+ .control_value = __control_value, \
+ .l3cc_value = __l3cc_value, \
+ .used = 1, \
+ }
/*
* MOCS tables
@@ -80,85 +92,147 @@ struct drm_i915_mocs_table {
* LNCFCMOCS0 - LNCFCMOCS32 registers.
*
* These tables are intended to be kept reasonably consistent across
- * platforms. However some of the fields are not applicable to all of
- * them.
+ * HW platforms, and for ICL+, be identical across OSes. To achieve
+ * that, for Icelake and above, list of entries is published as part
+ * of bspec.
*
* Entries not part of the following tables are undefined as far as
* userspace is concerned and shouldn't be relied upon. For the time
- * being they will be implicitly initialized to the strictest caching
- * configuration (uncached) to guarantee forwards compatibility with
- * userspace programs written against more recent kernels providing
- * additional MOCS entries.
+ * being they will be initialized to PTE.
*
- * NOTE: These tables MUST start with being uncached and the length
- * MUST be less than 63 as the last two registers are reserved
- * by the hardware. These tables are part of the kernel ABI and
- * may only be updated incrementally by adding entries at the
- * end.
+ * The last two entries are reserved by the hardware. For ICL+ they
+ * should be initialized according to bspec and never used, for older
+ * platforms they should never be written to.
+ *
+ * NOTE: These tables are part of bspec and defined as part of hardware
+ * interface for ICL+. For older platforms, they are part of kernel
+ * ABI. It is expected that, for specific hardware platform, existing
+ * entries will remain constant and the table will only be updated by
+ * adding new entries, filling unused positions.
*/
+#define GEN9_MOCS_ENTRIES \
+ MOCS_ENTRY(I915_MOCS_UNCACHED, \
+ LE_1_UC | LE_TC_2_LLC_ELLC, \
+ L3_1_UC), \
+ MOCS_ENTRY(I915_MOCS_PTE, \
+ LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
+ L3_3_WB)
+
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
- [I915_MOCS_UNCACHED] = {
- /* 0x00000009 */
- .control_value = LE_CACHEABILITY(LE_UC) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
-
- /* 0x0010 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
- },
- [I915_MOCS_PTE] = {
- /* 0x00000038 */
- .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
- /* 0x0030 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
- },
- [I915_MOCS_CACHED] = {
- /* 0x0000003b */
- .control_value = LE_CACHEABILITY(LE_WB) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
- /* 0x0030 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
- },
+ GEN9_MOCS_ENTRIES,
+ MOCS_ENTRY(I915_MOCS_CACHED,
+ LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+ L3_3_WB)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
- [I915_MOCS_UNCACHED] = {
- /* 0x00000009 */
- .control_value = LE_CACHEABILITY(LE_UC) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
-
- /* 0x0010 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
- },
- [I915_MOCS_PTE] = {
- /* 0x00000038 */
- .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
-
- /* 0x0030 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
- },
- [I915_MOCS_CACHED] = {
- /* 0x00000039 */
- .control_value = LE_CACHEABILITY(LE_UC) |
- LE_TGT_CACHE(LE_TC_LLC_ELLC) |
- LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
- LE_PFM(0) | LE_SCF(0),
-
- /* 0x0030 */
- .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
- },
+ GEN9_MOCS_ENTRIES,
+ MOCS_ENTRY(I915_MOCS_CACHED,
+ LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3),
+ L3_3_WB)
+};
+
+#define GEN11_MOCS_ENTRIES \
+ /* Base - Uncached (Deprecated) */ \
+ MOCS_ENTRY(I915_MOCS_UNCACHED, \
+ LE_1_UC | LE_TC_1_LLC, \
+ L3_1_UC), \
+ /* Base - L3 + LeCC:PAT (Deprecated) */ \
+ MOCS_ENTRY(I915_MOCS_PTE, \
+ LE_0_PAGETABLE | LE_TC_1_LLC, \
+ L3_3_WB), \
+ /* Base - L3 + LLC */ \
+ MOCS_ENTRY(2, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_3_WB), \
+ /* Base - Uncached */ \
+ MOCS_ENTRY(3, \
+ LE_1_UC | LE_TC_1_LLC, \
+ L3_1_UC), \
+ /* Base - L3 */ \
+ MOCS_ENTRY(4, \
+ LE_1_UC | LE_TC_1_LLC, \
+ L3_3_WB), \
+ /* Base - LLC */ \
+ MOCS_ENTRY(5, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC), \
+ /* Age 0 - LLC */ \
+ MOCS_ENTRY(6, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+ L3_1_UC), \
+ /* Age 0 - L3 + LLC */ \
+ MOCS_ENTRY(7, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
+ L3_3_WB), \
+ /* Age: Don't Chg. - LLC */ \
+ MOCS_ENTRY(8, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+ L3_1_UC), \
+ /* Age: Don't Chg. - L3 + LLC */ \
+ MOCS_ENTRY(9, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
+ L3_3_WB), \
+ /* No AOM - LLC */ \
+ MOCS_ENTRY(10, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM - L3 + LLC */ \
+ MOCS_ENTRY(11, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
+ L3_3_WB), \
+ /* No AOM; Age 0 - LLC */ \
+ MOCS_ENTRY(12, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM; Age 0 - L3 + LLC */ \
+ MOCS_ENTRY(13, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
+ L3_3_WB), \
+ /* No AOM; Age:DC - LLC */ \
+ MOCS_ENTRY(14, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+ L3_1_UC), \
+ /* No AOM; Age:DC - L3 + LLC */ \
+ MOCS_ENTRY(15, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
+ L3_3_WB), \
+ /* Self-Snoop - L3 + LLC */ \
+ MOCS_ENTRY(18, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(12.5%) */ \
+ MOCS_ENTRY(19, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(25%) */ \
+ MOCS_ENTRY(20, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(50%) */ \
+ MOCS_ENTRY(21, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(75%) */ \
+ MOCS_ENTRY(22, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \
+ L3_3_WB), \
+ /* Skip Caching - L3 + LLC(87.5%) */ \
+ MOCS_ENTRY(23, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \
+ L3_3_WB), \
+ /* HW Reserved - SW program but never use */ \
+ MOCS_ENTRY(62, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC), \
+ /* HW Reserved - SW program but never use */ \
+ MOCS_ENTRY(63, \
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
+ L3_1_UC)
+
+static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+ GEN11_MOCS_ENTRIES
};
/**
@@ -178,13 +252,19 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv) ||
- IS_ICELAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ table->size = ARRAY_SIZE(icelake_mocs_table);
+ table->table = icelake_mocs_table;
+ table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ result = true;
+ } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = skylake_mocs_table;
result = true;
} else if (IS_GEN9_LP(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
result = true;
} else {
@@ -193,7 +273,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
}
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN(dev_priv, 9)) {
int i;
for (i = 0; i < table->size; i++)
@@ -226,6 +306,19 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
}
}
+/*
+ * Get control_value from MOCS entry taking into account when it's not used:
+ * I915_MOCS_PTE's value is returned in this case.
+ */
+static u32 get_entry_control(const struct drm_i915_mocs_table *table,
+ unsigned int index)
+{
+ if (table->table[index].used)
+ return table->table[index].control_value;
+
+ return table->table[I915_MOCS_PTE].control_value;
+}
+
/**
* intel_mocs_init_engine() - emit the mocs control table
* @engine: The engine for whom to emit the registers.
@@ -238,27 +331,23 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
+ u32 unused_value;
if (!get_mocs_settings(dev_priv, &table))
return;
- GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES);
-
- for (index = 0; index < table.size; index++)
- I915_WRITE(mocs_register(engine->id, index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to uncached. These entries
- * are officially undefined and no contract for the contents
- * and settings is given for these entries.
- *
- * Entry 0 in the table is uncached - so we are just writing
- * that value to all the used entries.
- */
- for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
- I915_WRITE(mocs_register(engine->id, index),
- table.table[0].control_value);
+ /* Set unused values to PTE */
+ unused_value = table.table[I915_MOCS_PTE].control_value;
+
+ for (index = 0; index < table.size; index++) {
+ u32 value = get_entry_control(&table, index);
+
+ I915_WRITE(mocs_register(engine->id, index), value);
+ }
+
+ /* All remaining entries are also unused */
+ for (; index < table.n_entries; index++)
+ I915_WRITE(mocs_register(engine->id, index), unused_value);
}
/**
@@ -276,33 +365,32 @@ static int emit_mocs_control_table(struct i915_request *rq,
{
enum intel_engine_id engine = rq->engine->id;
unsigned int index;
+ u32 unused_value;
u32 *cs;
- if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ if (GEM_WARN_ON(table->size > table->n_entries))
return -ENODEV;
- cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ /* Set unused values to PTE */
+ unused_value = table->table[I915_MOCS_PTE].control_value;
+
+ cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
+ *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
for (index = 0; index < table->size; index++) {
+ u32 value = get_entry_control(table, index);
+
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = table->table[index].control_value;
+ *cs++ = value;
}
- /*
- * Ok, now set the unused entries to uncached. These entries
- * are officially undefined and no contract for the contents
- * and settings is given for these entries.
- *
- * Entry 0 in the table is uncached - so we are just writing
- * that value to all the used entries.
- */
- for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+ /* All remaining entries are also unused */
+ for (; index < table->n_entries; index++) {
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = table->table[0].control_value;
+ *cs++ = unused_value;
}
*cs++ = MI_NOOP;
@@ -311,12 +399,24 @@ static int emit_mocs_control_table(struct i915_request *rq,
return 0;
}
+/*
+ * Get l3cc_value from MOCS entry taking into account when it's not used:
+ * I915_MOCS_PTE's value is returned in this case.
+ */
+static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
+ unsigned int index)
+{
+ if (table->table[index].used)
+ return table->table[index].l3cc_value;
+
+ return table->table[I915_MOCS_PTE].l3cc_value;
+}
+
static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
u16 low,
u16 high)
{
- return table->table[low].l3cc_value |
- table->table[high].l3cc_value << 16;
+ return low | high << 16;
}
/**
@@ -333,38 +433,43 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
+ u16 unused_value;
unsigned int i;
u32 *cs;
- if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+ if (GEM_WARN_ON(table->size > table->n_entries))
return -ENODEV;
- cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES);
+ /* Set unused values to PTE */
+ unused_value = table->table[I915_MOCS_PTE].l3cc_value;
+
+ cs = intel_ring_begin(rq, 2 + table->n_entries);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
+ *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
+
+ for (i = 0; i < table->size / 2; i++) {
+ u16 low = get_entry_l3cc(table, 2 * i);
+ u16 high = get_entry_l3cc(table, 2 * i + 1);
- for (i = 0; i < table->size/2; i++) {
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
+ *cs++ = l3cc_combine(table, low, high);
}
+ /* Odd table size - 1 left over */
if (table->size & 0x01) {
- /* Odd table size - 1 left over */
+ u16 low = get_entry_l3cc(table, 2 * i);
+
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, 2 * i, 0);
+ *cs++ = l3cc_combine(table, low, unused_value);
i++;
}
- /*
- * Now set the rest of the table to uncached - use entry 0 as
- * this will be uncached. Leave the last pair uninitialised as
- * they are reserved by the hardware.
- */
- for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+ /* All remaining entries are also unused */
+ for (; i < table->n_entries / 2; i++) {
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, 0, 0);
+ *cs++ = l3cc_combine(table, unused_value, unused_value);
}
*cs++ = MI_NOOP;
@@ -391,26 +496,35 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
{
struct drm_i915_mocs_table table;
unsigned int i;
+ u16 unused_value;
if (!get_mocs_settings(dev_priv, &table))
return;
- for (i = 0; i < table.size/2; i++)
- I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+ /* Set unused values to PTE */
+ unused_value = table.table[I915_MOCS_PTE].l3cc_value;
+
+ for (i = 0; i < table.size / 2; i++) {
+ u16 low = get_entry_l3cc(&table, 2 * i);
+ u16 high = get_entry_l3cc(&table, 2 * i + 1);
+
+ I915_WRITE(GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, high));
+ }
/* Odd table size - 1 left over */
if (table.size & 0x01) {
- I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+ u16 low = get_entry_l3cc(&table, 2 * i);
+
+ I915_WRITE(GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, unused_value));
i++;
}
- /*
- * Now set the rest of the table to uncached - use entry 0 as
- * this will be uncached. Leave the last pair as initialised as
- * they are reserved by the hardware.
- */
- for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
- I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+ /* All remaining entries are also unused */
+ for (; i < table.n_entries / 2; i++)
+ I915_WRITE(GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, unused_value, unused_value));
}
/**
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index d89080d75b80..3d99d1271b2b 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -49,7 +49,6 @@
* context handling keep the MOCS in step.
*/
-#include <drm/drmP.h>
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 3ac20153705a..5e00ee9270b5 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -30,7 +30,6 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_opregion.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 20ea7c99d13a..c0df1dbb0069 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,8 +25,9 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
+#include <drm/drm_fourcc.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
@@ -185,7 +186,7 @@ struct intel_overlay {
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
- struct i915_gem_active last_flip;
+ struct i915_active_request last_flip;
};
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -213,23 +214,23 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct i915_request *rq,
- i915_gem_retire_fn retire)
+ i915_active_retire_fn retire)
{
- GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex));
- i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
- &overlay->i915->drm.struct_mutex);
- i915_gem_active_set(&overlay->last_flip, rq);
+ GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex));
+ i915_active_request_set_retire_fn(&overlay->last_flip, retire,
+ &overlay->i915->drm.struct_mutex);
+ __i915_active_request_set(&overlay->last_flip, rq);
i915_request_add(rq);
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct i915_request *rq,
- i915_gem_retire_fn retire)
+ i915_active_retire_fn retire)
{
intel_overlay_submit_request(overlay, rq, retire);
- return i915_gem_active_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
+ return i915_active_request_retire(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex);
}
static struct i915_request *alloc_request(struct intel_overlay *overlay)
@@ -350,8 +351,9 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
i915_vma_put(vma);
}
-static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct i915_request *rq)
+static void
+intel_overlay_release_old_vid_tail(struct i915_active_request *active,
+ struct i915_request *rq)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
@@ -359,7 +361,7 @@ static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
intel_overlay_release_old_vma(overlay);
}
-static void intel_overlay_off_tail(struct i915_gem_active *active,
+static void intel_overlay_off_tail(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_overlay *overlay =
@@ -422,8 +424,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- return i915_gem_active_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
+ return i915_active_request_retire(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex);
}
/* Wait for pending overlay flip and release old frame.
@@ -479,8 +481,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- intel_overlay_release_old_vid(overlay);
-
overlay->old_xscale = 0;
overlay->old_yscale = 0;
overlay->crtc = NULL;
@@ -541,7 +541,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
{
u32 sw;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -778,7 +778,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 oconfig;
oconfig = OCONF_CC_OUT_8BIT;
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -1012,7 +1012,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
+ if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1246,7 +1246,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN2(dev_priv)) {
+ if (!IS_GEN(dev_priv, 2)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1270,7 +1270,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
goto out_unlock;
if (overlay->active) {
@@ -1358,7 +1358,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- init_request_active(&overlay->last_flip, NULL);
+ INIT_ACTIVE_REQUEST(&overlay->last_flip);
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e6cd7b55c018..beca98d2b035 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -563,7 +563,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
@@ -929,7 +929,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
@@ -1087,20 +1087,11 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
- if (!panel->backlight.present)
- return;
-
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
-
- mutex_lock(&dev_priv->backlight_lock);
WARN_ON(panel->backlight.max == 0);
@@ -1117,6 +1108,24 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+}
+
+void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+
+ if (!panel->backlight.present)
+ return;
+
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ __intel_panel_enable_backlight(crtc_state, conn_state);
mutex_unlock(&dev_priv->backlight_lock);
}
@@ -1203,17 +1212,20 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 hw_level;
- int ret;
+ intel_wakeref_t wakeref;
+ int ret = 0;
- intel_runtime_pm_get(dev_priv);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 hw_level;
- hw_level = intel_panel_get_backlight(connector);
- ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- intel_runtime_pm_put(dev_priv);
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector,
+ hw_level, bd->props.max_brightness);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
return ret;
}
@@ -1484,8 +1496,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, val;
- bool alt;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ bool alt, cpu_mode;
if (HAS_PCH_LPT(dev_priv))
alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
@@ -1499,6 +1511,8 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1507,12 +1521,28 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.min = get_backlight_min_vbt(connector);
- val = lpt_get_backlight(connector);
+ panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+
+ cpu_mode = panel->backlight.enabled && HAS_PCH_LPT(dev_priv) &&
+ !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
+ (cpu_ctl2 & BLM_PWM_ENABLE);
+ if (cpu_mode)
+ val = pch_get_backlight(connector);
+ else
+ val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
- panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+ if (cpu_mode) {
+ DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+
+ /* Write converted CPU PWM value to PCH override register */
+ lpt_set_backlight(connector->base.state, panel->backlight.level);
+ I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+ }
return 0;
}
@@ -1557,7 +1587,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev_priv))
@@ -1773,6 +1803,24 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return 0;
}
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+ if (!panel->backlight.enabled)
+ __intel_panel_enable_backlight(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
@@ -1886,7 +1934,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
}
- } else if (IS_GEN4(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 4)) {
panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index f3c9010e332a..a8554dc4f196 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -44,7 +44,7 @@ static const char * const pipe_crc_sources[] = {
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -120,7 +120,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
bool need_stable_symbols = false;
@@ -165,7 +165,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -190,7 +190,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
bool need_stable_symbols = false;
@@ -244,7 +244,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
WARN_ON(!IS_G4X(dev_priv));
@@ -265,7 +265,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@@ -289,7 +289,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
tmp &= ~PIPE_A_SCRAMBLE_RESET;
@@ -304,7 +304,7 @@ static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -392,7 +392,7 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val,
+ u32 *val,
bool set_wa)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -427,13 +427,13 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source, u32 *val,
bool set_wa)
{
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
else if (INTEL_GEN(dev_priv) < 5)
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
else
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
@@ -544,13 +544,13 @@ static int
intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
const enum intel_pipe_crc_source source)
{
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return i8xx_crc_source_valid(dev_priv, source);
else if (INTEL_GEN(dev_priv) < 5)
return i9xx_crc_source_valid(dev_priv, source);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_crc_source_valid(dev_priv, source);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
else
return ivb_crc_source_valid(dev_priv, source);
@@ -589,6 +589,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
+ intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
@@ -598,7 +599,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -624,7 +626,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
pipe_crc->skipped = 0;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a26b4eddda25..54307f1df6cf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,13 +26,16 @@
*/
#include <linux/cpufreq.h>
+#include <linux/module.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
-#include <linux/module.h>
-#include <drm/drm_atomic_helper.h>
/**
* DOC: RC6
@@ -480,7 +483,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
int sprite0_start, sprite1_start;
switch (pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -513,7 +516,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -529,7 +532,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -546,7 +549,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -667,9 +670,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
unsigned int cpp,
unsigned int latency)
{
- uint64_t ret;
+ u64 ret;
- ret = (uint64_t) pixel_rate * cpp * latency;
+ ret = (u64)pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@@ -1089,9 +1092,9 @@ static int g4x_fbc_fifo_size(int level)
}
}
-static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1188,9 +1191,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val);
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -1399,10 +1402,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
@@ -1599,9 +1601,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1969,7 +1971,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&dev_priv->uncore.lock);
switch (crtc->pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ_FW(DSPARB);
dsparb2 = I915_READ_FW(DSPARB2);
@@ -2032,10 +2034,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
#undef VLV_FIFO
-static int vlv_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
@@ -2264,8 +2265,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
+ u32 fwater_lo;
+ u32 fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
@@ -2273,7 +2274,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev_priv))
+ else if (!IS_GEN(dev_priv, 2))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -2287,7 +2288,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2302,7 +2303,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2314,7 +2315,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2408,7 +2409,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
- uint32_t fwater_lo;
+ u32 fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev_priv);
@@ -2457,8 +2458,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate,
return ret;
}
-static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
- uint8_t cpp)
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
{
/*
* Neither of these should be possible since this function shouldn't be
@@ -2475,22 +2475,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
}
struct ilk_wm_maximums {
- uint16_t pri;
- uint16_t spr;
- uint16_t cur;
- uint16_t fbc;
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value,
- bool is_lp)
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value, bool is_lp)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2518,11 +2517,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2545,9 +2544,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
int cpp;
@@ -2565,9 +2564,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val)
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val)
{
int cpp;
@@ -2626,13 +2625,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
@@ -2668,7 +2666,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config)
{
@@ -2677,19 +2675,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(to_i915(dev), level);
+ return ilk_cursor_wm_reg_max(dev_priv, level);
}
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
@@ -2734,9 +2732,9 @@ static bool ilk_validate_wm_level(int level,
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
- result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
- result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
- result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
result->enable = true;
}
@@ -2752,9 +2750,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- uint16_t pri_latency = dev_priv->wm.pri_latency[level];
- uint16_t spr_latency = dev_priv->wm.spr_latency[level];
- uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->wm.pri_latency[level];
+ u16 spr_latency = dev_priv->wm.spr_latency[level];
+ u16 cur_latency = dev_priv->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2778,7 +2776,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static uint32_t
+static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
const struct intel_atomic_state *intel_state =
@@ -2807,10 +2805,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[8])
+ u16 wm[8])
{
if (INTEL_GEN(dev_priv) >= 9) {
- uint32_t val;
+ u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2894,7 +2892,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- uint64_t sskpd = I915_READ64(MCH_SSKPD);
+ u64 sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@@ -2904,14 +2902,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
- uint32_t sskpd = I915_READ(MCH_SSKPD);
+ u32 sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
- uint32_t mltr = I915_READ(MLTR_ILK);
+ u32 mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@@ -2923,18 +2921,18 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
@@ -2953,7 +2951,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const uint16_t wm[8])
+ const u16 wm[8])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2982,7 +2980,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
}
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5], uint16_t min)
+ u16 wm[5], u16 min)
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2991,7 +2989,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = max(wm[0], min);
for (level = 1; level <= max_level; level++)
- wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
@@ -3061,7 +3059,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
@@ -3073,7 +3071,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
-static bool ilk_validate_pipe_wm(struct drm_device *dev,
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -3085,7 +3083,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3150,7 +3148,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
- if (!ilk_validate_pipe_wm(dev, pipe_wm))
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
@@ -3180,17 +3178,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* state and the new state. These can be programmed to the hardware
* immediately.
*/
-static int ilk_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *newstate)
+static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
to_intel_atomic_state(newstate->base.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* Start with the final, target watermarks, then combine with the
@@ -3223,7 +3221,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev, a))
+ if (!ilk_validate_pipe_wm(dev_priv, a))
return -EINVAL;
/*
@@ -3239,7 +3237,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_device *dev,
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3247,7 +3245,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
ret_wm->enable = true;
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3272,12 +3270,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_device *dev,
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
@@ -3293,7 +3290,7 @@ static void ilk_wm_merge(struct drm_device *dev,
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev, level, wm);
+ ilk_merge_wm_level(dev_priv, level, wm);
if (level > last_enabled_level)
wm->enable = false;
@@ -3318,7 +3315,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
+ if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3335,22 +3332,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_device *dev,
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -3370,7 +3365,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+ (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
@@ -3396,7 +3391,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
}
/* LP0 register values */
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.ilk.wm[0];
@@ -3415,11 +3410,12 @@ static void ilk_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -3540,7 +3536,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
{
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
- uint32_t val;
+ u32 val;
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
@@ -3638,14 +3634,9 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
- return true;
-
- return false;
+ return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
}
static bool
@@ -3677,25 +3668,25 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
- DRM_DEBUG_KMS("Enabling the SAGV\n");
+ DRM_DEBUG_KMS("Enabling SAGV\n");
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
- /* We don't need to wait for the SAGV when enabling */
+ /* We don't need to wait for SAGV when enabling */
mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
- * don't actually have an SAGV.
+ * don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to enable the SAGV\n");
+ DRM_ERROR("Failed to enable SAGV\n");
return ret;
}
@@ -3714,7 +3705,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
- DRM_DEBUG_KMS("Disabling the SAGV\n");
+ DRM_DEBUG_KMS("Disabling SAGV\n");
mutex_lock(&dev_priv->pcu_lock);
/* bspec says to keep retrying for at least 1 ms */
@@ -3726,14 +3717,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
/*
* Some skl systems, pre-release machines in particular,
- * don't actually have an SAGV.
+ * don't actually have SAGV.
*/
if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
- DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+ DRM_ERROR("Failed to disable SAGV (%d)\n", ret);
return ret;
}
@@ -3756,15 +3747,15 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
sagv_block_time_us = 30;
- else if (IS_GEN10(dev_priv))
+ else if (IS_GEN(dev_priv, 10))
sagv_block_time_us = 20;
else
sagv_block_time_us = 10;
/*
- * SKL+ workaround: bspec recommends we disable the SAGV when we have
+ * SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*
* If there are no active CRTCs, no additional checks need be performed
@@ -3797,7 +3788,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
latency = dev_priv->wm.skl_latency[level];
- if (skl_needs_memory_bw_wa(intel_state) &&
+ if (skl_needs_memory_bw_wa(dev_priv) &&
plane->base.state->fb->modifier ==
I915_FORMAT_MOD_X_TILED)
latency += 15;
@@ -3805,7 +3796,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/*
* If any of the planes on this pipe don't enable wm levels that
* incur memory latencies higher than sagv_block_time_us we
- * can't enable the SAGV.
+ * can't enable SAGV.
*/
if (latency < sagv_block_time_us)
return false;
@@ -3834,8 +3825,13 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
/*
* 12GB/s is maximum BW supported by single DBuf slice.
+ *
+ * FIXME dbuf slice code is broken:
+ * - must wait for planes to stop using the slice before powering it off
+ * - plane straddling both slices is illegal in multi-pipe scenarios
+ * - should validate we stay within the hw bandwidth limits
*/
- if (num_active > 1 || total_data_bw >= GBps(12)) {
+ if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
@@ -3934,14 +3930,9 @@ static unsigned int skl_cursor_allocation(int num_active)
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *entry, u32 reg)
{
- u16 mask;
- if (INTEL_GEN(dev_priv) >= 11)
- mask = ICL_DDB_ENTRY_MASK;
- else
- mask = SKL_DDB_ENTRY_MASK;
- entry->start = reg & mask;
- entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+ entry->start = reg & DDB_ENTRY_MASK;
+ entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK;
if (entry->end)
entry->end += 1;
@@ -3994,10 +3985,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -4006,7 +3999,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
&ddb_y[plane_id],
&ddb_uv[plane_id]);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@ -4036,7 +4029,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- uint32_t src_w, src_h, dst_w, dst_h;
+ u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4082,8 +4075,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
return pipe_downscale;
if (crtc_state->pch_pfit.enabled) {
- uint32_t src_w, src_h, dst_w, dst_h;
- uint32_t pfit_size = crtc_state->pch_pfit.size;
+ u32 src_w, src_h, dst_w, dst_h;
+ u32 pfit_size = crtc_state->pch_pfit.size;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4116,7 +4109,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
const struct drm_plane_state *pstate;
struct intel_plane_state *intel_pstate;
int crtc_clock, dotclk;
- uint32_t pipe_max_pixel_rate;
+ u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
@@ -4172,8 +4165,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane *intel_plane =
to_intel_plane(intel_pstate->base.plane);
- uint32_t data_rate;
- uint32_t width = 0, height = 0;
+ u32 data_rate;
+ u32 width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
@@ -4306,102 +4299,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
return total_data_rate;
}
-static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
-{
- struct drm_framebuffer *fb = pstate->fb;
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- uint32_t src_w, src_h;
- uint32_t min_scanlines = 8;
- uint8_t plane_bpp;
-
- if (WARN_ON(!fb))
- return 0;
-
- /* For packed formats, and uv-plane, return 0 */
- if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
- return 0;
-
- /* For Non Y-tile return 8-blocks */
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
- fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
- return 8;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
- src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
-
- /* Halve UV plane width and height for NV12 */
- if (plane == 1) {
- src_w /= 2;
- src_h /= 2;
- }
-
- plane_bpp = fb->format->cpp[plane];
-
- if (drm_rotation_90_or_270(pstate->rotation)) {
- switch (plane_bpp) {
- case 1:
- min_scanlines = 32;
- break;
- case 2:
- min_scanlines = 16;
- break;
- case 4:
- min_scanlines = 8;
- break;
- case 8:
- min_scanlines = 4;
- break;
- default:
- WARN(1, "Unsupported pixel depth %u for rotation",
- plane_bpp);
- min_scanlines = 32;
- }
- }
-
- return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
-}
-
-static void
-skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
- uint16_t *minimum, uint16_t *uv_minimum)
-{
- const struct drm_plane_state *pstate;
- struct drm_plane *plane;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* slave plane must be invisible and calculated from master */
- if (!pstate->visible || WARN_ON(plane_state->slave))
- continue;
-
- if (!plane_state->linked_plane) {
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- } else {
- enum plane_id y_plane_id =
- plane_state->linked_plane->id;
-
- minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- }
- }
-
- minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
-}
-
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -4411,15 +4308,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- uint16_t alloc_size, start;
- uint16_t minimum[I915_MAX_PLANES] = {};
- uint16_t uv_minimum[I915_MAX_PLANES] = {};
+ struct skl_plane_wm *wm;
+ u16 alloc_size, start = 0;
+ u16 total[I915_MAX_PLANES] = {};
+ u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
int num_active;
u64 plane_data_rate[I915_MAX_PLANES] = {};
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
- uint16_t total_min_blocks = 0;
+ u32 blocks;
+ int level;
/* Clear the partitioning for disabled planes. */
memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
@@ -4449,81 +4348,135 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (alloc_size == 0)
return 0;
- skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
+ /* Allocate fixed number of blocks for cursor. */
+ total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ alloc_size -= total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ alloc->end - total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+
+ if (total_data_rate == 0)
+ return 0;
/*
- * 1. Allocate the mininum required blocks for each active plane
- * and allocate the cursor, it doesn't require extra allocation
- * proportional to the data rate.
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
*/
+ for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- total_min_blocks += minimum[plane_id];
- total_min_blocks += uv_minimum[plane_id];
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks < alloc_size) {
+ alloc_size -= blocks;
+ break;
+ }
}
- if (total_min_blocks > alloc_size) {
+ if (level < 0) {
DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
- alloc_size);
+ DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
+ alloc_size);
return -EINVAL;
}
- alloc_size -= total_min_blocks;
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
-
/*
- * 2. Distribute the remaining space in proportion to the amount of
- * data each plane needs to fetch from memory.
- *
- * FIXME: we may not allocate every single block here.
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
*/
- if (total_data_rate == 0)
- return 0;
-
- start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- u64 data_rate, uv_data_rate;
- uint16_t plane_blocks, uv_plane_blocks;
+ u64 rate;
+ u16 extra;
if (plane_id == PLANE_CURSOR)
continue;
- data_rate = plane_data_rate[plane_id];
-
/*
- * allocation for (packed formats) or (uv-plane part of planar format):
- * promote the expression to 64 bits to avoid overflowing, the
- * result is < available as data_rate / total_data_rate < 1
+ * We've accounted for all active planes; remaining planes are
+ * all disabled.
*/
- plane_blocks = minimum[plane_id];
- plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
+ if (total_data_rate == 0)
+ break;
- /* Leave disabled planes at (0,0) */
- if (data_rate) {
- cstate->wm.skl.plane_ddb_y[plane_id].start = start;
- cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
- }
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+
+ rate = plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
- start += plane_blocks;
+ if (total_data_rate == 0)
+ break;
- /* Allocate DDB for UV plane for planar format/NV12 */
- uv_data_rate = uv_plane_data_rate[plane_id];
+ rate = uv_plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
+ }
+ WARN_ON(alloc_size != 0 || total_data_rate != 0);
- uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+ /* Set the actual DDB start/end points for each plane */
+ start = alloc->start;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
+ uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+
+ /* Leave disabled planes at (0,0) */
+ if (total[plane_id]) {
+ plane_alloc->start = start;
+ start += total[plane_id];
+ plane_alloc->end = start;
+ }
+
+ if (uv_total[plane_id]) {
+ uv_plane_alloc->start = start;
+ start += uv_total[plane_id];
+ uv_plane_alloc->end = start;
+ }
+ }
- if (uv_data_rate) {
- cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
- cstate->wm.skl.plane_ddb_uv[plane_id].end =
- start + uv_plane_blocks;
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
}
+ }
- start += uv_plane_blocks;
+ /*
+ * Go back and disable the transition watermark if it turns out we
+ * don't have enough DDB blocks for it.
+ */
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ if (wm->trans_wm.plane_res_b >= total[plane_id])
+ memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
return 0;
@@ -4536,10 +4489,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
- uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
+skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4554,12 +4507,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
return ret;
}
-static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
- uint32_t pipe_htotal,
- uint32_t latency,
- uint_fixed_16_16_t plane_blocks_per_line)
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4575,8 +4527,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
- uint32_t pixel_rate;
- uint32_t crtc_htotal;
+ u32 pixel_rate;
+ u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
@@ -4593,11 +4545,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate)
return linetime_us;
}
-static uint32_t
+static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
- uint64_t adjusted_pixel_rate;
+ u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
@@ -4624,10 +4576,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
- uint32_t interm_pbpl;
- struct intel_atomic_state *state =
- to_intel_atomic_state(cstate->base.state);
- bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+ u32 interm_pbpl;
/* only NV12 format has two planes */
if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
@@ -4663,7 +4612,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
intel_pstate);
if (INTEL_GEN(dev_priv) >= 11 &&
- fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8)
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
@@ -4688,7 +4637,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_min_scanlines = 4;
}
- if (apply_memory_bw_wa)
+ if (skl_needs_memory_bw_wa(dev_priv))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -4702,7 +4651,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
- } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+ } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4720,28 +4669,34 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
return 0;
}
-static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- uint16_t ddb_allocation,
- int level,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
+static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
+{
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- uint32_t latency = dev_priv->wm.skl_latency[level];
+ u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
- uint32_t res_blocks, res_lines;
- struct intel_atomic_state *state =
- to_intel_atomic_state(cstate->base.state);
- bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- uint32_t min_disp_buf_needed;
+ u32 res_blocks, res_lines, min_ddb_alloc = 0;
- if (latency == 0)
- return level == 0 ? -EINVAL : 0;
+ if (latency == 0) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4749,7 +4704,7 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
dev_priv->ipc_enabled)
latency += 4;
- if (apply_memory_bw_wa && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
latency += 15;
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
@@ -4766,15 +4721,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- } else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
- if (IS_GEN9(dev_priv) &&
- !IS_GEMINILAKE(dev_priv))
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (IS_GEN9(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))
selected_result = min_fixed16(method1, method2);
else
@@ -4788,85 +4736,76 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
res_lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- /* Display WA #1125: skl,bxt,kbl,glk */
- if (level == 0 && wp->rc_surface)
- res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+ res_lines += wp->y_min_scanlines;
+ } else {
+ res_blocks++;
+ }
- /* Display WA #1126: skl,bxt,kbl,glk */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- res_blocks += fixed16_to_u32_round_up(
- wp->y_tile_minimum);
- res_lines += wp->y_min_scanlines;
- } else {
- res_blocks++;
+ /*
+ * Make sure result blocks for higher latency levels are
+ * atleast as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->plane_res_b > res_blocks)
+ res_blocks = result_prev->plane_res_b;
}
-
- /*
- * Make sure result blocks for higher latency levels are atleast
- * as high as level below the current level.
- * Assumption in DDB algorithm optimization for special cases.
- * Also covers Display WA #1125 for RC.
- */
- if (result_prev->plane_res_b > res_blocks)
- res_blocks = result_prev->plane_res_b;
}
if (INTEL_GEN(dev_priv) >= 11) {
if (wp->y_tiled) {
- uint32_t extra_lines;
- uint_fixed_16_16_t fp_min_disp_buf_needed;
+ int extra_lines;
if (res_lines % wp->y_min_scanlines == 0)
extra_lines = wp->y_min_scanlines;
else
extra_lines = wp->y_min_scanlines * 2 -
- res_lines % wp->y_min_scanlines;
+ res_lines % wp->y_min_scanlines;
- fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
- extra_lines,
- wp->plane_blocks_per_line);
- min_disp_buf_needed = fixed16_to_u32_round_up(
- fp_min_disp_buf_needed);
+ min_ddb_alloc = mul_round_up_u32_fixed16(res_lines + extra_lines,
+ wp->plane_blocks_per_line);
} else {
- min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
+ min_ddb_alloc = res_blocks +
+ DIV_ROUND_UP(res_blocks, 10);
}
- } else {
- min_disp_buf_needed = res_blocks;
}
- if ((level > 0 && res_lines > 31) ||
- res_blocks >= ddb_allocation ||
- min_disp_buf_needed >= ddb_allocation) {
- /*
- * If there are no valid level 0 watermarks, then we can't
- * support this display configuration.
- */
- if (level) {
- return 0;
- } else {
- struct drm_plane *plane = pstate->plane;
+ if (!skl_wm_has_lines(dev_priv, level))
+ res_lines = 0;
- DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
- plane->base.id, plane->name,
- res_blocks, ddb_allocation, res_lines);
- return -EINVAL;
- }
+ if (res_lines > 31) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
}
- /* The number of lines are ignored for the level 0 watermark. */
+ /*
+ * If res_lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1;
result->plane_en = true;
-
- return 0;
}
-static int
+static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
@@ -4874,45 +4813,30 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
- int ret;
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- ret = skl_compute_plane_wm(cstate,
- intel_pstate,
- ddb_blocks,
- level,
- wm_params,
- result_prev,
- result);
- if (ret)
- return ret;
+ skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ result_prev, result);
result_prev = result;
}
-
- return 0;
}
-static uint32_t
+static u32
skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
- uint32_t linetime_wm;
+ u32 linetime_wm;
linetime_us = intel_get_linetime_us(cstate);
-
- if (is_fixed16_zero(linetime_us))
- return 0;
-
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
- /* Display WA #1135: bxt:ALL GLK:ALL */
- if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
linetime_wm /= 2;
return linetime_wm;
@@ -4920,14 +4844,13 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const struct skl_wm_params *wp,
- struct skl_plane_wm *wm,
- uint16_t ddb_allocation)
+ struct skl_plane_wm *wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- uint16_t trans_min, trans_y_tile_min;
- const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
+ u16 trans_min, trans_y_tile_min;
+ const u16 trans_amount = 10; /* This is configurable amount */
+ u16 wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
@@ -4956,8 +4879,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
- trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
- wp->y_tile_minimum);
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
@@ -4969,12 +4892,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
}
- res_blocks += 1;
-
- if (res_blocks < ddb_allocation) {
- wm->trans_wm.plane_res_b = res_blocks;
- wm->trans_wm.plane_en = true;
- }
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ wm->trans_wm.plane_res_b = res_blocks + 1;
+ wm->trans_wm.plane_en = true;
}
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -4982,7 +4906,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
enum plane_id plane_id, int color_plane)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -4991,12 +4914,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->wm);
- if (ret)
- return ret;
-
- skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
}
@@ -5006,7 +4925,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
enum plane_id plane_id)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -5018,10 +4936,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->uv_wm);
- if (ret)
- return ret;
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
return 0;
}
@@ -5139,7 +5054,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_wm_level *level)
{
- uint32_t val = 0;
+ u32 val = 0;
if (level->plane_en) {
val |= PLANE_WM_EN;
@@ -5230,6 +5145,23 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
+static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
+ const struct skl_pipe_wm *wm1,
+ const struct skl_pipe_wm *wm2)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (!skl_plane_wm_equals(dev_priv,
+ &wm1->planes[plane_id],
+ &wm2->planes[plane_id]))
+ return false;
+ }
+
+ return wm1->linetime == wm2->linetime;
+}
+
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
const struct skl_ddb_entry *b)
{
@@ -5251,35 +5183,32 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
bool *changed /* out */)
{
- struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+ struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
+ ret = skl_build_pipe_wm(cstate, pipe_wm);
if (ret)
return ret;
- if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
- *changed = false;
- else
- *changed = true;
+ *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
return 0;
}
-static uint32_t
-pipes_modified(struct drm_atomic_state *state)
+static u32
+pipes_modified(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- uint32_t i, ret = 0;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ u32 i, ret = 0;
- for_each_new_crtc_in_state(state, crtc, cstate, i)
- ret |= drm_crtc_mask(crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+ ret |= drm_crtc_mask(&crtc->base);
return ret;
}
@@ -5314,11 +5243,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
}
static int
-skl_compute_ddb(struct drm_atomic_state *state)
+skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
@@ -5326,7 +5254,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
@@ -5372,15 +5300,13 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
static int
-skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- struct intel_crtc *intel_crtc;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- uint32_t realloc_pipes = pipes_modified(state);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u32 realloc_pipes = pipes_modified(state);
int ret, i;
/*
@@ -5398,7 +5324,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* since any racing commits that want to update them would need to
* hold _all_ CRTC state mutexes.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
(*changed) = true;
if (!*changed)
@@ -5412,20 +5338,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
*/
if (dev_priv->wm.distrust_bios_wm) {
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- state->acquire_ctx);
+ state->base.acquire_ctx);
if (ret)
return ret;
- intel_state->active_pipe_changes = ~0;
+ state->active_pipe_changes = ~0;
/*
- * We usually only initialize intel_state->active_crtcs if we
+ * We usually only initialize state->active_crtcs if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
- if (!intel_state->modeset)
- intel_state->active_crtcs = dev_priv->active_crtcs;
+ if (!state->modeset)
+ state->active_crtcs = dev_priv->active_crtcs;
}
/*
@@ -5441,21 +5367,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* any other display updates race with this transaction, so we need
* to grab the lock on *all* CRTC's.
*/
- if (intel_state->active_pipe_changes || intel_state->modeset) {
+ if (state->active_pipe_changes || state->modeset) {
realloc_pipes = ~0;
- intel_state->wm_results.dirty_pipes = ~0;
+ state->wm_results.dirty_pipes = ~0;
}
/*
* We're not recomputing for the pipes not included in the commit, so
* make sure we start with the current state.
*/
- for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
- struct intel_crtc_state *cstate;
-
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
+ for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
}
return 0;
@@ -5522,12 +5446,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
}
static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_compute_wm(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_values *results = &intel_state->wm_results;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ struct intel_crtc_state *old_crtc_state;
+ struct skl_ddb_values *results = &state->wm_results;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5539,47 +5463,35 @@ skl_compute_wm(struct drm_atomic_state *state)
if (ret || !changed)
return ret;
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
/*
* Calculate WM's for all pipes that are part of this transaction.
- * Note that the DDB allocation above may have added more CRTC's that
+ * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
* weren't otherwise being modified (and set bits in dirty_pipes) if
* pipe allocations had to change.
- *
- * FIXME: Now that we're doing this in the atomic check phase, we
- * should allow skl_update_pipe_wm() to return failure in cases where
- * no suitable watermark values can be found.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(cstate);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ cstate, i) {
const struct skl_pipe_wm *old_pipe_wm =
- &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
+ &old_crtc_state->wm.skl.optimal;
- pipe_wm = &intel_cstate->wm.skl.optimal;
+ pipe_wm = &cstate->wm.skl.optimal;
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
if (ret)
return ret;
- ret = skl_wm_add_affected_planes(intel_state,
- to_intel_crtc(crtc));
+ ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
if (changed)
- results->dirty_pipes |= drm_crtc_mask(crtc);
-
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
- /* This pipe's WM's did not change */
- continue;
-
- intel_cstate->update_wm_pre = true;
+ results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
- skl_print_wm_changes(intel_state);
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ skl_print_wm_changes(state);
return 0;
}
@@ -5617,13 +5529,13 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_compute_wm_config(struct drm_device *dev,
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -5637,25 +5549,24 @@ static void ilk_compute_wm_config(struct drm_device *dev,
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev, &config);
+ ilk_compute_wm_config(dev_priv, &config);
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -5663,7 +5574,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
ilk_write_wm_values(dev_priv, &results);
}
@@ -5694,7 +5605,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static inline void skl_wm_level_from_reg_val(uint32_t val,
+static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
@@ -5703,19 +5614,18 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
PLANE_WM_LINES_MASK;
}
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
int level, max_level;
enum plane_id plane_id;
- uint32_t val;
+ u32 val;
max_level = ilk_wm_max_level(dev_priv);
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
for (level = 0; level <= max_level; level++) {
@@ -5735,30 +5645,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- if (!intel_crtc->active)
+ if (!crtc->active)
return;
out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
-void skl_wm_get_hw_state(struct drm_device *dev)
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(crtc->state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ cstate = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
- if (intel_crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ if (crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
if (dev_priv->active_crtcs) {
@@ -5767,15 +5674,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
}
}
-static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
@@ -5788,7 +5694,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
memset(active, 0, sizeof(*active));
- active->pipe_enabled = intel_crtc->active;
+ active->pipe_enabled = crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -5816,7 +5722,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[level].enable = true;
}
- intel_crtc->wm.active.ilk = *active;
+ crtc->wm.active.ilk = *active;
}
#define _FW_WM(value, plane) \
@@ -5827,7 +5733,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
@@ -5854,7 +5760,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
enum pipe pipe;
- uint32_t tmp;
+ u32 tmp;
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
@@ -5926,9 +5832,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-void g4x_wm_get_hw_state(struct drm_device *dev)
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct g4x_wm_values *wm = &dev_priv->wm.g4x;
struct intel_crtc *crtc;
@@ -5936,7 +5841,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev)
wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6067,9 +5972,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-void vlv_wm_get_hw_state(struct drm_device *dev)
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_values *wm = &dev_priv->wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -6113,7 +6017,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
mutex_unlock(&dev_priv->pcu_lock);
}
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6230,15 +6134,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-void ilk_wm_get_hw_state(struct drm_device *dev)
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
- for_each_crtc(dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -6339,10 +6242,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
-
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
@@ -6805,7 +6704,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (!rps->enabled)
return;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ if (i915_request_signaled(rq))
return;
/* Serializes with i915_request_retire() */
@@ -7049,7 +6948,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
@@ -7285,9 +7184,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev_priv) && ret) {
+ if (IS_GEN(dev_priv, 6) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -7412,7 +7311,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
+ switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7985,16 +7884,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
-
- val = __i915_chipset_val(dev_priv);
-
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_chipset_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
return val;
}
@@ -8071,14 +7971,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (!IS_GEN5(dev_priv))
- return;
+ intel_wakeref_t wakeref;
- spin_lock_irq(&mchdev_lock);
-
- __i915_update_gfx_val(dev_priv);
+ if (!IS_GEN(dev_priv, 5))
+ return;
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ __i915_update_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -8120,18 +8022,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
- val = __i915_gfx_val(dev_priv);
+ return val;
+}
- spin_unlock_irq(&mchdev_lock);
+static struct drm_i915_private *i915_mch_dev;
- return val;
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = i915_mch_dev;
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
}
/**
@@ -8142,23 +8060,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = __i915_chipset_val(dev_priv);
- graphics_val = __i915_gfx_val(dev_priv);
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
- ret = chipset_val + graphics_val;
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
-out_unlock:
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(i915, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __i915_chipset_val(i915);
+ graphics_val = __i915_gfx_val(i915);
+ spin_unlock_irq(&mchdev_lock);
+ }
- return ret;
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -8169,23 +8088,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
+ struct drm_i915_private *i915;
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
- dev_priv->ips.max_delay--;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay > i915->ips.fmax)
+ i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -8197,23 +8112,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
+ struct drm_i915_private *i915;
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
- dev_priv->ips.max_delay++;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay < i915->ips.min_delay)
+ i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -8224,13 +8135,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- bool ret = false;
+ struct drm_i915_private *i915;
+ bool ret;
- spin_lock_irq(&mchdev_lock);
- if (i915_mch_dev)
- ret = i915_mch_dev->gt.awake;
- spin_unlock_irq(&mchdev_lock);
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -8243,24 +8157,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->ips.max_delay = dev_priv->ips.fstart;
+ struct drm_i915_private *i915;
+ bool ret;
- if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
- ret = false;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ i915->ips.max_delay = i915->ips.fstart;
+ ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock);
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -8289,18 +8198,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = dev_priv;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, dev_priv);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, NULL);
}
static void intel_init_emon(struct drm_i915_private *dev_priv)
@@ -8410,7 +8315,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_freq_opcode(dev_priv, 450));
/* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN6(dev_priv) ||
+ if (IS_GEN(dev_priv, 6) ||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
u32 params = 0;
@@ -8639,7 +8544,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
* Required for FBC
@@ -8711,7 +8616,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
int pipe;
- uint32_t val;
+ u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -8746,7 +8651,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
@@ -8756,7 +8661,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -8850,7 +8755,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
- uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+ u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
@@ -8886,7 +8791,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9124,7 +9029,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t snpcr;
+ u32 snpcr;
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -9333,7 +9238,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate;
+ u32 dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -9480,9 +9385,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
- else if (IS_GEN6(dev_priv))
+ else if (IS_GEN(dev_priv, 6))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -9490,11 +9395,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
- else if (IS_GEN3(dev_priv))
+ else if (IS_GEN(dev_priv, 3))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else if (IS_GEN2(dev_priv))
+ else if (IS_GEN(dev_priv, 2))
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -9508,7 +9413,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
@@ -9520,9 +9425,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -9563,12 +9468,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_GEN4(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
@@ -9583,7 +9488,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
@@ -9606,7 +9511,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index f71970df9936..84a0fb981561 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -51,7 +51,6 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -71,17 +70,17 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
- /* Disable PSR2 by default for all platforms */
- if (i915_modparams.enable_psr == -1)
- return false;
-
/* Cannot enable DSC and PSR2 simultaneously */
WARN_ON(crtc_state->dsc_params.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
+ case I915_PSR_DEBUG_DEFAULT:
+ if (i915_modparams.enable_psr <= 0)
+ return false;
default:
return crtc_state->has_psr2;
}
@@ -231,7 +230,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
- uint8_t dprx = 0;
+ u8 dprx = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
&dprx) != 1)
@@ -241,7 +240,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
- uint8_t alpm_caps = 0;
+ u8 alpm_caps = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
&alpm_caps) != 1)
@@ -261,6 +260,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
+static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+{
+ u16 val;
+ ssize_t r;
+
+ /*
+ * Returning the default X granularity if granularity not required or
+ * if DPCD read fails
+ */
+ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
+ return 4;
+
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+ if (r != 2)
+ DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+
+ /*
+ * Spec says that if the value read is 0 the default granularity should
+ * be used instead.
+ */
+ if (r != 2 || val == 0)
+ val = 4;
+
+ return val;
+}
+
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
@@ -315,6 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.su_x_granularity =
+ intel_dp_get_su_x_granulartiy(intel_dp);
}
}
}
@@ -357,7 +384,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 aux_clock_divider, aux_ctl;
int i;
- static const uint8_t aux_msg[] = {
+ static const u8 aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
@@ -394,13 +421,15 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE);
- dpcd_val |= DP_PSR_ENABLE_PSR2;
+ dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ } else {
+ if (dev_priv->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (dev_priv->psr.link_standby)
- dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
- dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -474,9 +503,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
- /* FIXME: selective update is probably totally broken because it doesn't
- * mesh at all with our frontbuffer tracking. And the hw alone isn't
- * good enough. */
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
val |= EDP_Y_COORDINATE_ENABLE;
@@ -525,7 +551,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
- } else if (IS_GEN9(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
}
@@ -537,6 +563,18 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /*
+ * HW sends SU blocks of size four scan lines, which means the starting
+ * X coordinate and Y granularity requirements will always be met. We
+ * only need to validate the SU block width is a multiple of
+ * x granularity.
+ */
+ if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ return false;
+ }
+
return true;
}
@@ -647,17 +685,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
+ if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+ !IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
cpu_transcoder);
u32 chicken = I915_READ(reg);
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
- chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
- | PSR2_ADD_VERTICAL_LINE_COUNT);
-
- else
- chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
+ chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(reg, chicken);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fbeaec3994e7..7f841dba87b3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -29,11 +29,11 @@
#include <linux/log2.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
+#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_workarounds.h"
@@ -43,17 +43,10 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static unsigned int __intel_ring_space(unsigned int head,
- unsigned int tail,
- unsigned int size)
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
- /*
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
- GEM_BUG_ON(!is_power_of_2(size));
- return (head - tail - CACHELINE_BYTES) & (size - 1);
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_INDEX_ADDR);
}
unsigned int intel_ring_update_space(struct intel_ring *ring)
@@ -133,7 +126,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
+ if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
cmd |= MI_INVALIDATE_ISP;
}
@@ -217,7 +210,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
@@ -257,7 +250,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(rq);
+ ret = gen6_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
@@ -300,6 +293,43 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
return 0;
}
+static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+
+ /* Finally we can flush and with it emit the breadcrumb */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+ *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->global_seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
static int
gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
@@ -379,11 +409,111 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
return 0;
}
-static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = intel_hws_seqno_address(rq->engine);
+ *cs++ = rq->global_seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->global_seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+
+#define GEN7_XCS_WA 32
+static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->fence.seqno;
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->global_seqno;
+
+ for (i = 0; i < GEN7_XCS_WA; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+ }
+
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+
+ return cs;
+}
+#undef GEN7_XCS_WA
+
+static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
+{
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 6)
+ mask &= ~BIT(0);
+ else
+ mask &= ~I915_USER_INTERRUPT;
+ }
+
+ intel_engine_set_hwsp_writemask(engine, mask);
+}
+
+static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct page *page = virt_to_page(engine->status_page.page_addr);
- phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
u32 addr;
addr = lower_32_bits(phys);
@@ -393,15 +523,30 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
I915_WRITE(HWS_PGA, addr);
}
-static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
+static struct page *status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ return sg_page(obj->mm.pages->sgl);
+}
+
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+{
+ set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
+ set_hwstam(engine, ~0u);
+}
+
+static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t mmio;
+ i915_reg_t hwsp;
- /* The ring status page addresses are no longer next to the rest of
+ /*
+ * The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -410,56 +555,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
default:
GEM_BUG_ON(engine->id);
case RCS:
- mmio = RENDER_HWS_PGA_GEN7;
+ hwsp = RENDER_HWS_PGA_GEN7;
break;
case BCS:
- mmio = BLT_HWS_PGA_GEN7;
+ hwsp = BLT_HWS_PGA_GEN7;
break;
case VCS:
- mmio = BSD_HWS_PGA_GEN7;
+ hwsp = BSD_HWS_PGA_GEN7;
break;
case VECS:
- mmio = VEBOX_HWS_PGA_GEN7;
+ hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(dev_priv)) {
- mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
+ } else if (IS_GEN(dev_priv, 6)) {
+ hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
- mmio = RING_HWS_PGA(engine->mmio_base);
+ hwsp = RING_HWS_PGA(engine->mmio_base);
}
- if (INTEL_GEN(dev_priv) >= 6) {
- u32 mask = ~0u;
+ I915_WRITE(hwsp, offset);
+ POSTING_READ(hwsp);
+}
- /*
- * Keep the render interrupt unmasked as this papers over
- * lost interrupts following a reset.
- */
- if (engine->id == RCS)
- mask &= ~BIT(0);
+static void flush_cs_tlb(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
- I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
- }
+ if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ return;
- I915_WRITE(mmio, engine->status_page.ggtt_offset);
- POSTING_READ(mmio);
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
- /* Flush the TLB for this page */
- if (IS_GEN(dev_priv, 6, 7)) {
- i915_reg_t reg = RING_INSTPM(engine->mmio_base);
+ I915_WRITE(instpm,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (intel_wait_for_register(dev_priv,
+ instpm, INSTPM_SYNC_FLUSH, 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ engine->name);
+}
- /* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+static void ring_setup_status_page(struct intel_engine_cs *engine)
+{
+ set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
+ set_hwstam(engine, ~0u);
- I915_WRITE(reg,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(dev_priv,
- reg, INSTPM_SYNC_FLUSH, 0,
- 1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
- }
+ flush_cs_tlb(engine);
}
static bool stop_ring(struct intel_engine_cs *engine)
@@ -529,17 +673,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
- intel_ring_setup_status_page(engine);
+ ring_setup_status_page(engine);
intel_engine_reset_breadcrumbs(engine);
- if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
- I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
- if (HAS_VEBOX(dev_priv))
- I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
- }
-
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -593,63 +730,87 @@ static int init_ring_common(struct intel_engine_cs *engine)
}
/* Papering over lost _interrupts_ immediately following the restart */
- intel_engine_wakeup(engine);
+ intel_engine_queue_breadcrumbs(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
+static void reset_prepare(struct intel_engine_cs *engine)
{
intel_engine_stop_cs(engine);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- return i915_gem_find_active_request(engine);
}
-static void skip_request(struct i915_request *rq)
+static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{
- void *vaddr = rq->ring->vaddr;
+ struct i915_timeline *tl = &engine->timeline;
+ struct i915_request *pos, *rq;
+ unsigned long flags;
u32 head;
- head = rq->infix;
- if (rq->postfix < head) {
- memset32(vaddr + head, MI_NOOP,
- (rq->ring->size - head) / sizeof(u32));
- head = 0;
+ rq = NULL;
+ spin_lock_irqsave(&tl->lock, flags);
+ list_for_each_entry(pos, &tl->requests, link) {
+ if (!i915_request_completed(pos)) {
+ rq = pos;
+ break;
+ }
}
- memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
-}
-
-static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
-{
- GEM_TRACE("%s request global=%d, current=%d\n",
- engine->name, rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine));
+ GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
+ engine->name,
+ rq ? rq->global_seqno : 0,
+ intel_engine_get_seqno(engine),
+ yesno(stalled));
/*
- * Try to restore the logical GPU state to match the continuation
- * of the request queue. If we skip the context/PD restore, then
- * the next request may try to execute assuming that its context
- * is valid and loaded on the GPU and so may try to access invalid
- * memory, prompting repeated GPU hangs.
+ * The guilty request will get skipped on a hung engine.
*
- * If the request was guilty, we still restore the logical state
- * in case the next request requires it (e.g. the aliasing ppgtt),
- * but skip over the hung batch.
+ * Users of client default contexts do not rely on logical
+ * state preserved between batches so it is safe to execute
+ * queued requests following the hang. Non default contexts
+ * rely on preserved state, so skipping a batch loses the
+ * evolution of the state and it needs to be considered corrupted.
+ * Executing more queued batches on top of corrupted state is
+ * risky. But we take the risk by trying to advance through
+ * the queued requests in order to make the client behaviour
+ * more predictable around resets, by not throwing away random
+ * amount of batches it has prepared for execution. Sophisticated
+ * clients can use gem_reset_stats_ioctl and dma fence status
+ * (exported via sync_file info ioctl on explicit fences) to observe
+ * when it loses the context state and should rebuild accordingly.
*
- * If the request was innocent, we try to replay the request with
- * the restored context.
+ * The context ban, and ultimately the client ban, mechanism are safety
+ * valves if client submission ends up resulting in nothing more than
+ * subsequent hangs.
*/
+
if (rq) {
- /* If the rq hung, jump to its breadcrumb and skip the batch */
- rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
- if (rq->fence.error == -EIO)
- skip_request(rq);
+ /*
+ * Try to restore the logical GPU state to match the
+ * continuation of the request queue. If we skip the
+ * context/PD restore, then the next request may try to execute
+ * assuming that its context is valid and loaded on the GPU and
+ * so may try to access invalid memory, prompting repeated GPU
+ * hangs.
+ *
+ * If the request was guilty, we still restore the logical
+ * state in case the next request requires it (e.g. the
+ * aliasing ppgtt), but skip over the hung batch.
+ *
+ * If the request was innocent, we try to replay the request
+ * with the restored context.
+ */
+ i915_reset_request(rq, stalled);
+
+ GEM_BUG_ON(rq->ring != engine->buffer);
+ head = rq->head;
+ } else {
+ head = engine->buffer->tail;
}
+ engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+
+ spin_unlock_irqrestore(&tl->lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
@@ -679,7 +840,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN(dev_priv, 4, 6))
+ if (IS_GEN_RANGE(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -688,22 +849,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (IS_GEN(dev_priv, 6, 7))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
@@ -713,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (IS_GEN(dev_priv, 6, 7))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
@@ -722,33 +883,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int num_rings = 0;
-
- for_each_engine(engine, dev_priv, id) {
- i915_reg_t mbox_reg;
-
- if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
- continue;
-
- mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
- if (i915_mmio_reg_valid(mbox_reg)) {
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(mbox_reg);
- *cs++ = rq->global_seqno;
- num_rings++;
- }
- }
- if (num_rings & 1)
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
static void cancel_requests(struct intel_engine_cs *engine)
{
struct i915_request *request;
@@ -760,11 +894,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &request->fence.flags))
- continue;
+ if (!i915_request_signaled(request))
+ dma_fence_set_error(&request->fence, -EIO);
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_mark_complete(request);
}
intel_write_status_page(engine,
@@ -786,94 +919,59 @@ static void i9xx_submit_request(struct i915_request *request)
intel_ring_set_tail(request->ring, request->tail));
}
-static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
+ GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+
+ *cs++ = MI_FLUSH;
+
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR;
*cs++ = rq->global_seqno;
+
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
-}
-static const int i9xx_emit_breadcrumb_sz = 4;
-
-static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
+ return cs;
}
-static int
-gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
+#define GEN5_WA_STORES 8 /* must be at least 1! */
+static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- u32 dw1 = MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
- u32 *cs;
-
- WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
+ int i;
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
- *cs++ = dw1 | wait_mbox;
- /* Throughout all of the GEM code, seqno passed implies our current
- * seqno is >= the last seqno executed. However for hardware the
- * comparison is strictly greater than.
- */
- *cs++ = signal->global_seqno - 1;
- *cs++ = 0;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ *cs++ = MI_FLUSH;
- return 0;
-}
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
+
+ BUILD_BUG_ON(GEN5_WA_STORES < 1);
+ for (i = 0; i < GEN5_WA_STORES; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR;
+ *cs++ = rq->global_seqno;
+ }
-static void
-gen5_seqno_barrier(struct intel_engine_cs *engine)
-{
- /* MI_STORE are internally buffered by the GPU and not flushed
- * either by MI_FLUSH or SyncFlush or any other combination of
- * MI commands.
- *
- * "Only the submission of the store operation is guaranteed.
- * The write result will be complete (coherent) some time later
- * (this is practically a finite period but there is no guaranteed
- * latency)."
- *
- * Empirically, we observe that we need a delay of at least 75us to
- * be sure that the seqno write is visible by the CPU.
- */
- usleep_range(125, 250);
-}
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
-static void
-gen6_seqno_barrier(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
- /* Workaround to force correct ordering between irq and seqno writes on
- * ivb (and maybe also on snb) by reading from a CS register (like
- * ACTHD) before reading the status page.
- *
- * Note that this effectively stalls the read by the time it takes to
- * do a memory transaction, which more or less ensures that the write
- * from the GPU has sufficient time to invalidate the CPU cacheline.
- * Alternatively we could delay the interrupt from the CS ring to give
- * the write time to land, but that would incur a delay after every
- * batch i.e. much more frequent than a delay when waiting for the
- * interrupt (with the same net latency).
- *
- * Also note that to prevent whole machine hangs on gen7, we have to
- * take the spinlock to guard against concurrent cacheline access.
- */
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
+ return cs;
}
+#undef GEN5_WA_STORES
static void
gen5_irq_enable(struct intel_engine_cs *engine)
@@ -948,6 +1046,10 @@ gen6_irq_enable(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
engine->irq_keep_mask));
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
@@ -966,6 +1068,10 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
@@ -1091,6 +1197,10 @@ int intel_ring_pin(struct intel_ring *ring)
GEM_BUG_ON(ring->vaddr);
+ ret = i915_timeline_pin(ring->timeline);
+ if (ret)
+ return ret;
+
flags = PIN_GLOBAL;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1107,28 +1217,32 @@ int intel_ring_pin(struct intel_ring *ring)
else
ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
if (unlikely(ret))
- return ret;
+ goto unpin_timeline;
}
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- return ret;
+ goto unpin_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
addr = i915_gem_object_pin_map(vma->obj, map);
- if (IS_ERR(addr))
- goto err;
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto unpin_ring;
+ }
vma->obj->pin_global++;
ring->vaddr = addr;
return 0;
-err:
+unpin_ring:
i915_vma_unpin(vma);
- return PTR_ERR(addr);
+unpin_timeline:
+ i915_timeline_unpin(ring->timeline);
+ return ret;
}
void intel_ring_reset(struct intel_ring *ring, u32 tail)
@@ -1157,6 +1271,8 @@ void intel_ring_unpin(struct intel_ring *ring)
ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma);
+
+ i915_timeline_unpin(ring->timeline);
}
static struct i915_vma *
@@ -1467,13 +1583,18 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
struct intel_ring *ring;
int err;
- intel_engine_setup_common(engine);
+ err = intel_engine_setup_common(engine);
+ if (err)
+ return err;
- timeline = i915_timeline_create(engine->i915, engine->name);
+ timeline = i915_timeline_create(engine->i915,
+ engine->name,
+ engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
+ GEM_BUG_ON(timeline->has_initial_breadcrumb);
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
i915_timeline_put(timeline);
@@ -1493,6 +1614,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
goto err_unpin;
+ GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+
return 0;
err_unpin:
@@ -1581,10 +1704,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
const int num_rings =
- /* Use an extended w/a on gen7 if signalling from other rings */
- (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
- INTEL_INFO(i915)->num_rings - 1 :
- 0;
+ IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1597,7 +1717,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
- if (IS_GEN7(i915))
+ if (IS_GEN(i915, 7))
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
@@ -1611,7 +1731,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (IS_GEN7(i915)) {
+ if (IS_GEN(i915, 7)) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
@@ -1658,7 +1778,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*/
*cs++ = MI_NOOP;
- if (IS_GEN7(i915)) {
+ if (IS_GEN(i915, 7)) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
@@ -1828,18 +1948,21 @@ static int ring_request_alloc(struct i915_request *request)
int ret;
GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
- /* Flush enough space to reduce the likelihood of waiting after
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ ret = switch_context(request);
if (ret)
return ret;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -1881,22 +2004,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
return 0;
}
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
-{
- GEM_BUG_ON(bytes > ring->effective_size);
- if (unlikely(bytes > ring->effective_size - ring->emit))
- bytes += ring->size - ring->emit;
-
- if (unlikely(bytes > ring->space)) {
- int ret = wait_for_space(ring, bytes);
- if (unlikely(ret))
- return ret;
- }
-
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
{
struct intel_ring *ring = rq->ring;
@@ -2129,77 +2236,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
-static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- int i;
-
- if (!HAS_LEGACY_SEMAPHORES(dev_priv))
- return;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
- engine->semaphore.sync_to = gen6_ring_sync_to;
- engine->semaphore.signal = gen6_signal;
-
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID.
- */
- for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
- static const struct {
- u32 wait_mbox;
- i915_reg_t mbox_reg;
- } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
- [RCS_HW] = {
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
- },
- [VCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
- },
- [BCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
- },
- [VECS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
- },
- };
- u32 wait_mbox;
- i915_reg_t mbox_reg;
-
- if (i == engine->hw_id) {
- wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
- mbox_reg = GEN6_NOSYNC;
- } else {
- wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
- mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
- }
-
- engine->semaphore.mbox.wait[i] = wait_mbox;
- engine->semaphore.mbox.signal[i] = mbox_reg;
- }
-}
-
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
- engine->irq_seqno_barrier = gen5_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
@@ -2231,7 +2276,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
intel_ring_init_irq(dev_priv, engine);
- intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
engine->reset.prepare = reset_prepare;
@@ -2241,18 +2285,14 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->context_pin = intel_ring_context_pin;
engine->request_alloc = ring_request_alloc;
- engine->emit_breadcrumb = i9xx_emit_breadcrumb;
- engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
- int num_rings;
-
- engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- engine->emit_breadcrumb_sz += num_rings * 3;
- if (num_rings & 1)
- engine->emit_breadcrumb_sz++;
- }
+ /*
+ * Using a global execution timeline; the previous final breadcrumb is
+ * equivalent to our next initial bread so we can elide
+ * engine->emit_init_breadcrumb().
+ */
+ engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
+ if (IS_GEN(dev_priv, 5))
+ engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
@@ -2278,12 +2318,15 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 7) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
- if (IS_GEN6(dev_priv))
- engine->emit_flush = gen6_render_ring_flush;
- } else if (IS_GEN5(dev_priv)) {
+ engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
+ } else if (IS_GEN(dev_priv, 6)) {
+ engine->init_context = intel_rcs_ctx_init;
+ engine->emit_flush = gen6_render_ring_flush;
+ engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
+ } else if (IS_GEN(dev_priv, 5)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
@@ -2313,13 +2356,18 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+
+ if (IS_GEN(dev_priv, 6))
+ engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ else
+ engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
} else {
engine->emit_flush = bsd_ring_flush;
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
@@ -2332,11 +2380,18 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
+
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ if (IS_GEN(dev_priv, 6))
+ engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
+ else
+ engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+
return intel_init_ring_buffer(engine);
}
@@ -2344,6 +2399,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
+
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
@@ -2351,5 +2408,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
engine->irq_enable = hsw_vebox_irq_enable;
engine->irq_disable = hsw_vebox_irq_disable;
+ engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
+
return intel_init_ring_buffer(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a1a7cc29fdd1..710ffb221775 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,7 @@
#include <drm/drm_util.h>
#include <linux/hashtable.h>
+#include <linux/irq_work.h>
#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
@@ -28,12 +29,11 @@ struct i915_sched_attr;
* workarounds!
*/
#define CACHELINE_BYTES 64
-#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
struct intel_hw_status_page {
struct i915_vma *vma;
- u32 *page_addr;
- u32 ggtt_offset;
+ u32 *addr;
};
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
@@ -94,12 +94,12 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
- (IS_GEN7(dev_priv__) ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN7(dev_priv__) ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
@@ -120,13 +120,8 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
- enum intel_engine_hangcheck_action action;
unsigned long action_timestamp;
- int deadlock;
struct intel_instdone instdone;
- struct i915_request *active_request;
- bool stalled:1;
- bool wedged:1;
};
struct intel_ring {
@@ -209,6 +204,7 @@ struct i915_priolist {
struct st_preempt_hang {
struct completion completion;
+ unsigned int count;
bool inject_hang;
};
@@ -299,14 +295,18 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
- * @queue_priority: Highest pending priority.
+ * @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
* executing requests, we compute the maximum priority of those
* pending requests. We can then use this value to determine if
* we need to preempt the executing requests to service the queue.
+ * However, since the we may have recorded the priority of an inflight
+ * request we wanted to preempt but since completed, at the time of
+ * dequeuing the priority hint may no longer may match the highest
+ * available request priority.
*/
- int queue_priority;
+ int queue_priority_hint;
/**
* @queue: queue of requests, in priority lists
@@ -365,9 +365,6 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
- unsigned long irq_posted;
-#define ENGINE_IRQ_BREADCRUMB 0
-
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -385,23 +382,14 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- spinlock_t irq_lock; /* protects irq_*; irqsafe */
- struct intel_wait *irq_wait; /* oldest waiter by retirement */
-
- spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
- struct rb_root waiters; /* sorted by retirement, priority */
- struct list_head signals; /* sorted by retirement */
- struct task_struct *signaler; /* used for fence signalling */
+ spinlock_t irq_lock;
+ struct list_head signalers;
- struct timer_list fake_irq; /* used after a missed interrupt */
- struct timer_list hangcheck; /* detect missed interrupts */
+ struct irq_work irq_work; /* for use from inside irq_lock */
- unsigned int hangcheck_interrupts;
unsigned int irq_enabled;
- unsigned int irq_count;
- bool irq_armed : 1;
- I915_SELFTEST_DECLARE(bool mock : 1);
+ bool irq_armed;
} breadcrumbs;
struct {
@@ -449,9 +437,8 @@ struct intel_engine_cs {
int (*init_hw)(struct intel_engine_cs *engine);
struct {
- struct i915_request *(*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine,
- struct i915_request *rq);
+ void (*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine, bool stalled);
void (*finish)(struct intel_engine_cs *engine);
} reset;
@@ -475,8 +462,10 @@ struct intel_engine_cs {
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
- void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
- int emit_breadcrumb_sz;
+ int (*emit_init_breadcrumb)(struct i915_request *rq);
+ u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
+ u32 *cs);
+ unsigned int emit_fini_breadcrumb_dw;
/* Pass the request to the hardware queue (e.g. directly into
* the legacy ringbuffer or to the end of an execlist).
@@ -502,69 +491,8 @@ struct intel_engine_cs {
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
- /* Some chipsets are not quite as coherent as advertised and need
- * an expensive kick to force a true read of the up-to-date seqno.
- * However, the up-to-date seqno is not always required and the last
- * seen value is good enough. Note that the seqno will always be
- * monotonic, even if not coherent.
- */
- void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
void (*cleanup)(struct intel_engine_cs *engine);
- /* GEN8 signal/wait table - never trust comments!
- * signal to signal to signal to signal to signal to
- * RCS VCS BCS VECS VCS2
- * --------------------------------------------------------------------
- * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
- * |-------------------------------------------------------------------
- * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
- * |-------------------------------------------------------------------
- * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
- * |-------------------------------------------------------------------
- * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
- * |-------------------------------------------------------------------
- * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
- * |-------------------------------------------------------------------
- *
- * Generalization:
- * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
- * ie. transpose of g(x, y)
- *
- * sync from sync from sync from sync from sync from
- * RCS VCS BCS VECS VCS2
- * --------------------------------------------------------------------
- * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
- * |-------------------------------------------------------------------
- * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
- * |-------------------------------------------------------------------
- * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
- * |-------------------------------------------------------------------
- * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
- * |-------------------------------------------------------------------
- * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
- * |-------------------------------------------------------------------
- *
- * Generalization:
- * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
- * ie. transpose of f(x, y)
- */
- struct {
-#define GEN6_SEMAPHORE_LAST VECS_HW
-#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
-#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
- struct {
- /* our mbox written by others */
- u32 wait[GEN6_NUM_SEMAPHORES];
- /* mboxes this ring signals to */
- i915_reg_t signal[GEN6_NUM_SEMAPHORES];
- } mbox;
-
- /* AKA wait() */
- int (*sync_to)(struct i915_request *rq,
- struct i915_request *signal);
- u32 *(*signal)(struct i915_request *rq, u32 *cs);
- } semaphore;
-
struct intel_engine_execlists execlists;
/* Contexts are pinned whilst they are active on the GPU. The last
@@ -665,7 +593,20 @@ intel_engine_has_preemption(const struct intel_engine_cs *engine)
static inline bool __execlists_need_preempt(int prio, int last)
{
- return prio > max(0, last);
+ /*
+ * Allow preemption of low -> normal -> high, but we do
+ * not allow low priority tasks to preempt other low priority
+ * tasks under the impression that latency for low priority
+ * tasks does not matter (as much as background throughput),
+ * so kiss.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
+ */
+ return prio > max(I915_PRIORITY_NORMAL - 1, last);
}
static inline void
@@ -743,7 +684,7 @@ static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
- return READ_ONCE(engine->status_page.page_addr[reg]);
+ return READ_ONCE(engine->status_page.addr[reg]);
}
static inline void
@@ -756,12 +697,12 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
mb();
- clflush(&engine->status_page.page_addr[reg]);
- engine->status_page.page_addr[reg] = value;
- clflush(&engine->status_page.page_addr[reg]);
+ clflush(&engine->status_page.addr[reg]);
+ engine->status_page.addr[reg] = value;
+ clflush(&engine->status_page.addr[reg]);
mb();
} else {
- WRITE_ONCE(engine->status_page.page_addr[reg], value);
+ WRITE_ONCE(engine->status_page.addr[reg], value);
}
}
@@ -782,11 +723,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-#define I915_GEM_HWS_PREEMPT_INDEX 0x32
-#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-#define I915_GEM_HWS_SCRATCH_INDEX 0x40
-#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32))
+#define I915_GEM_HWS_PREEMPT 0x32
+#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_SEQNO 0x40
+#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
+#define I915_GEM_HWS_SCRATCH 0x80
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
#define I915_HWS_CSB_BUF0_INDEX 0x10
#define I915_HWS_CSB_WRITE_INDEX 0x1f
@@ -809,7 +752,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
@@ -890,9 +832,21 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
return tail;
}
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -904,6 +858,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -948,102 +904,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
-/*
- * Arbitrary size for largest possible 'add request' sequence. The code paths
- * are complex and variable. Empirical measurement shows that the worst case
- * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
- * we need to allocate double the largest single packet within that emission
- * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
- */
-#define MIN_SPACE_FOR_ADD_REQUEST 336
-
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
-}
-
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
- return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
-}
-
-/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-
-static inline void intel_wait_init(struct intel_wait *wait)
-{
- wait->tsk = current;
- wait->request = NULL;
-}
-
-static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
-{
- wait->tsk = current;
- wait->seqno = seqno;
-}
-
-static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
-{
- return wait->seqno;
-}
-
-static inline bool
-intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
-{
- wait->seqno = seqno;
- return intel_wait_has_seqno(wait);
-}
-
-static inline bool
-intel_wait_update_request(struct intel_wait *wait,
- const struct i915_request *rq)
-{
- return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
-}
-
-static inline bool
-intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
-{
- return wait->seqno == seqno;
-}
-
-static inline bool
-intel_wait_check_request(const struct intel_wait *wait,
- const struct i915_request *rq)
-{
- return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
-}
+void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-static inline bool intel_wait_complete(const struct intel_wait *wait)
-{
- return RB_EMPTY_NODE(&wait->node);
-}
+void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-bool intel_engine_add_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait);
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait);
-bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
-void intel_engine_cancel_signaling(struct i915_request *request);
+bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
-static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
+static inline void
+intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
{
- return READ_ONCE(engine->breadcrumbs.irq_wait);
+ irq_work_queue(&engine->breadcrumbs.irq_work);
}
-unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
-#define ENGINE_WAKEUP_WAITER BIT(0)
-#define ENGINE_WAKEUP_ASLEEP BIT(1)
-
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-
-void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
+bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p);
+
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
@@ -1056,7 +939,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
}
static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
/* We're using qword write, offset should be aligned to 8 bytes. */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -1066,8 +949,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
* following the batch.
*/
*cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
+ *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = gtt_offset;
*cs++ = 0;
*cs++ = value;
@@ -1093,7 +975,14 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
return cs;
}
-void intel_engines_sanitize(struct drm_i915_private *i915);
+static inline void intel_engine_reset(struct intel_engine_cs *engine,
+ bool stalled)
+{
+ if (engine->reset.reset)
+ engine->reset.reset(engine, stalled);
+}
+
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4350a5270423..a017a4232c0f 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -29,6 +29,8 @@
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
@@ -49,6 +51,268 @@
* present for a given platform.
*/
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+#include <linux/sort.h>
+
+#define STACKDEPTH 8
+
+static noinline depot_stack_handle_t __save_depot_stack(void)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ .skip = 1,
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries &&
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+static void __print_depot_stack(depot_stack_handle_t stack,
+ char *buf, int sz, int indent)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+
+ depot_fetch_stack(stack, &trace);
+ snprint_stack_trace(buf, sz, &trace, indent);
+}
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+ spin_lock_init(&rpm->debug.lock);
+}
+
+static noinline depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ depot_stack_handle_t stack, *stacks;
+ unsigned long flags;
+
+ atomic_inc(&rpm->wakeref_count);
+ assert_rpm_wakelock_held(i915);
+
+ if (!HAS_RUNTIME_PM(i915))
+ return -1;
+
+ stack = __save_depot_stack();
+ if (!stack)
+ return -1;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+
+ if (!rpm->debug.count)
+ rpm->debug.last_acquire = stack;
+
+ stacks = krealloc(rpm->debug.owners,
+ (rpm->debug.count + 1) * sizeof(*stacks),
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (stacks) {
+ stacks[rpm->debug.count++] = stack;
+ rpm->debug.owners = stacks;
+ } else {
+ stack = -1;
+ }
+
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ return stack;
+}
+
+static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ depot_stack_handle_t stack)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long flags, n;
+ bool found = false;
+
+ if (unlikely(stack == -1))
+ return;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+ for (n = rpm->debug.count; n--; ) {
+ if (rpm->debug.owners[n] == stack) {
+ memmove(rpm->debug.owners + n,
+ rpm->debug.owners + n + 1,
+ (--rpm->debug.count - n) * sizeof(stack));
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ if (WARN(!found,
+ "Unmatched wakeref (tracking %lu), count %u\n",
+ rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
+
+ stack = READ_ONCE(rpm->debug.last_release);
+ if (stack) {
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
+ }
+
+ kfree(buf);
+ }
+}
+
+static int cmphandle(const void *_a, const void *_b)
+{
+ const depot_stack_handle_t * const a = _a, * const b = _b;
+
+ if (*a < *b)
+ return -1;
+ else if (*a > *b)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+__print_intel_runtime_pm_wakeref(struct drm_printer *p,
+ const struct intel_runtime_pm_debug *dbg)
+{
+ unsigned long i;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (dbg->last_acquire) {
+ __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref last acquired:\n%s", buf);
+ }
+
+ if (dbg->last_release) {
+ __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref last released:\n%s", buf);
+ }
+
+ drm_printf(p, "Wakeref count: %lu\n", dbg->count);
+
+ sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
+
+ for (i = 0; i < dbg->count; i++) {
+ depot_stack_handle_t stack = dbg->owners[i];
+ unsigned long rep;
+
+ rep = 1;
+ while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
+ rep++, i++;
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
+ }
+
+ kfree(buf);
+}
+
+static noinline void
+untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ struct intel_runtime_pm_debug dbg = {};
+ struct drm_printer p;
+ unsigned long flags;
+
+ assert_rpm_wakelock_held(i915);
+ if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+ &rpm->debug.lock,
+ flags)) {
+ dbg = rpm->debug;
+
+ rpm->debug.owners = NULL;
+ rpm->debug.count = 0;
+ rpm->debug.last_release = __save_depot_stack();
+
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+ }
+ if (!dbg.count)
+ return;
+
+ p = drm_debug_printer("i915");
+ __print_intel_runtime_pm_wakeref(&p, &dbg);
+
+ kfree(dbg.owners);
+}
+
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p)
+{
+ struct intel_runtime_pm_debug dbg = {};
+
+ do {
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long alloc = dbg.count;
+ depot_stack_handle_t *s;
+
+ spin_lock_irq(&rpm->debug.lock);
+ dbg.count = rpm->debug.count;
+ if (dbg.count <= alloc) {
+ memcpy(dbg.owners,
+ rpm->debug.owners,
+ dbg.count * sizeof(*s));
+ }
+ dbg.last_acquire = rpm->debug.last_acquire;
+ dbg.last_release = rpm->debug.last_release;
+ spin_unlock_irq(&rpm->debug.lock);
+ if (dbg.count <= alloc)
+ break;
+
+ s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+ if (!s)
+ goto out;
+
+ dbg.owners = s;
+ } while (1);
+
+ __print_intel_runtime_pm_wakeref(p, &dbg);
+
+out:
+ kfree(dbg.owners);
+}
+
+#else
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+static depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ atomic_inc(&i915->runtime_pm.wakeref_count);
+ assert_rpm_wakelock_held(i915);
+ return -1;
+}
+
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ assert_rpm_wakelock_held(i915);
+ atomic_dec(&i915->runtime_pm.wakeref_count);
+}
+
+#endif
+
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -509,7 +773,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
val |= I915_READ(regs->bios);
@@ -639,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
* back on and register state is restored. This is guaranteed by the MMIO write
* to DC_STATE_EN blocking until the state is restored.
*/
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
- uint32_t val;
- uint32_t mask;
+ u32 val;
+ u32 mask;
if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
@@ -1274,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
enum pipe pipe;
- uint32_t tmp;
+ u32 tmp;
WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
@@ -1591,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- intel_runtime_pm_get(dev_priv);
+ intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
+
+ return wakeref;
}
/**
@@ -1617,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref;
bool is_enabled;
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return false;
mutex_lock(&power_domains->lock);
@@ -1637,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- if (!is_enabled)
- intel_runtime_pm_put(dev_priv);
+ if (!is_enabled) {
+ intel_runtime_pm_put(dev_priv, wakeref);
+ wakeref = 0;
+ }
- return is_enabled;
+ return wakeref;
}
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@@ -1671,9 +1932,33 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(dev_priv);
+}
- intel_runtime_pm_put(dev_priv);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
+#endif
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
@@ -3043,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
-static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
{
- uint32_t mask;
+ u32 mask;
int requested_dc;
int max_dc;
@@ -3058,7 +3343,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
- } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -3311,7 +3596,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
@@ -3622,7 +3907,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- uint32_t status = I915_READ(DPLL(PIPE_A));
+ u32 status = I915_READ(DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@@ -3653,7 +3938,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- uint32_t status = I915_READ(DPIO_PHY_STATUS);
+ u32 status = I915_READ(DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -3712,7 +3997,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@@ -3726,30 +4011,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_fini_hw().
*/
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
power_domains->initializing = true;
- if (IS_ICELAKE(dev_priv)) {
- icl_display_core_init(dev_priv, resume);
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_display_core_init(dev_priv, resume);
- } else if (IS_GEN9_BC(dev_priv)) {
- skl_display_core_init(dev_priv, resume);
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ if (IS_ICELAKE(i915)) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_CANNONLAKE(i915)) {
+ cnl_display_core_init(i915, resume);
+ } else if (IS_GEN9_BC(i915)) {
+ skl_display_core_init(i915, resume);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_init(i915, resume);
+ } else if (IS_CHERRYVIEW(i915)) {
mutex_lock(&power_domains->lock);
- chv_phy_control_init(dev_priv);
+ chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(dev_priv);
+ vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
- } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ }
/*
* Keep all power wells enabled for any dependent HW access during
@@ -3757,18 +4043,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
/* Disable power support if the user asked so. */
if (!i915_modparams.disable_power_well)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- intel_power_domains_sync_hw(dev_priv);
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
}
/**
* intel_power_domains_fini_hw - deinitialize hw power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@@ -3777,21 +4065,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
{
- /* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(dev_priv);
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915_modparams.disable_power_well)
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
- intel_power_domains_verify_state(dev_priv);
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(i915, wakeref);
}
/**
* intel_power_domains_enable - enable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -3801,30 +4092,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
-void intel_power_domains_enable(struct drm_i915_private *dev_priv)
+void intel_power_domains_enable(struct drm_i915_private *i915)
{
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
-void intel_power_domains_disable(struct drm_i915_private *dev_priv)
+void intel_power_domains_disable(struct drm_i915_private *i915)
{
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ struct i915_power_domains *power_domains = &i915->power_domains;
- intel_power_domains_verify_state(dev_priv);
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_suspend - suspend power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
*
* This function prepares the hardware power domain state before entering
@@ -3833,12 +4130,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&power_domains->wakeref);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@@ -3847,10 +4146,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
- dev_priv->csr.dmc_payload != NULL) {
- intel_power_domains_verify_state(dev_priv);
+ i915->csr.dmc_payload) {
+ intel_power_domains_verify_state(i915);
return;
}
@@ -3859,25 +4158,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915_modparams.disable_power_well) {
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_verify_state(i915);
}
- if (IS_ICELAKE(dev_priv))
- icl_display_core_uninit(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_display_core_uninit(dev_priv);
- else if (IS_GEN9_BC(dev_priv))
- skl_display_core_uninit(dev_priv);
- else if (IS_GEN9_LP(dev_priv))
- bxt_display_core_uninit(dev_priv);
+ if (IS_ICELAKE(i915))
+ icl_display_core_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_display_core_uninit(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_display_core_uninit(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_display_core_uninit(i915);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function resume the hardware power domain state during system resume.
*
@@ -3885,28 +4184,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
-void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+void intel_power_domains_resume(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(dev_priv, true);
+ intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
- intel_power_domains_verify_state(dev_priv);
+ intel_power_domains_verify_state(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
- for_each_power_well(dev_priv, power_well) {
+ for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%-25s %d\n",
@@ -3921,7 +4222,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@@ -3929,22 +4230,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
bool dump_domain_info;
mutex_lock(&power_domains->lock);
dump_domain_info = false;
- for_each_power_well(dev_priv, power_well) {
+ for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
- enabled = power_well->desc->ops->is_enabled(dev_priv,
- power_well);
+ enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
@@ -3968,7 +4268,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
static bool dumped;
if (!dumped) {
- intel_power_domains_dump_info(dev_priv);
+ intel_power_domains_dump_info(i915);
dumped = true;
}
}
@@ -3978,7 +4278,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
#else
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
}
@@ -3986,30 +4286,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
/**
* intel_runtime_pm_get - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
int ret;
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(dev_priv);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
@@ -4018,12 +4319,13 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
- * Returns: True if the wakeref was acquired, or False otherwise.
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
*/
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
{
if (IS_ENABLED(CONFIG_PM)) {
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@@ -4033,18 +4335,15 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(kdev) <= 0)
- return false;
+ return 0;
}
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(dev_priv);
-
- return true;
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
@@ -4058,41 +4357,50 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(i915);
pm_runtime_get_noresume(kdev);
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_put - release a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
- assert_rpm_wakelock_held(dev_priv);
- atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ untrack_intel_runtime_pm_wakeref(i915);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+ cancel_intel_runtime_pm_wakeref(i915, wref);
+ intel_runtime_pm_put_unchecked(i915);
+}
+#endif
+
/**
* intel_runtime_pm_enable - enable runtime pm
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function enables runtime pm at the end of the driver load sequence.
*
@@ -4100,9 +4408,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
* subordinate display power domains. That is done by
* intel_power_domains_enable().
*/
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_enable(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@@ -4124,7 +4432,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev_priv)) {
+ if (!HAS_RUNTIME_PM(i915)) {
int ret;
pm_runtime_dont_use_autosuspend(kdev);
@@ -4142,17 +4450,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(kdev);
}
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_disable(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/* Transfer rpm ownership back to core */
- WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
+ WARN(pm_runtime_get_sync(kdev) < 0,
"Failed to pass rpm ownership back to core\n");
pm_runtime_dont_use_autosuspend(kdev);
- if (!HAS_RUNTIME_PM(dev_priv))
+ if (!HAS_RUNTIME_PM(i915))
pm_runtime_put(kdev);
}
+
+void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ int count;
+
+ count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
+ WARN(count,
+ "i915->runtime_pm.wakeref_count=%d on cleanup\n",
+ count);
+
+ untrack_intel_runtime_pm_wakeref(i915);
+}
+
+void intel_runtime_pm_init_early(struct drm_i915_private *i915)
+{
+ init_intel_runtime_pm_wakeref(i915);
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 5805ec1aba12..e7b0884ba5a5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/export.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -77,7 +76,7 @@ struct intel_sdvo {
i915_reg_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
- uint16_t controlled_output;
+ u16 controlled_output;
/*
* Capabilities of the SDVO device returned by
@@ -92,33 +91,32 @@ struct intel_sdvo {
* For multiple function SDVO device,
* this is for current attached outputs.
*/
- uint16_t attached_output;
+ u16 attached_output;
/*
* Hotplug activation bits for this device
*/
- uint16_t hotplug_active;
+ u16 hotplug_active;
enum port port;
bool has_hdmi_monitor;
bool has_hdmi_audio;
- bool rgb_quant_range_selectable;
/* DDC bus used by this SDVO encoder */
- uint8_t ddc_bus;
+ u8 ddc_bus;
/*
* the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
*/
- uint8_t dtd_sdvo_flags;
+ u8 dtd_sdvo_flags;
};
struct intel_sdvo_connector {
struct intel_connector base;
/* Mark the type of connector */
- uint16_t output_flag;
+ u16 output_flag;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
@@ -186,7 +184,7 @@ to_intel_sdvo_connector(struct drm_connector *connector)
container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags);
static bool
intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
@@ -748,9 +746,9 @@ static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
static bool
intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
- uint16_t clock,
- uint16_t width,
- uint16_t height)
+ u16 clock,
+ u16 width,
+ u16 height)
{
struct intel_sdvo_preferred_input_timing_args args;
@@ -793,9 +791,9 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
const struct drm_display_mode *mode)
{
- uint16_t width, height;
- uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
- uint16_t h_sync_offset, v_sync_offset;
+ u16 width, height;
+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ u16 h_sync_offset, v_sync_offset;
int mode_clock;
memset(dtd, 0, sizeof(*dtd));
@@ -900,13 +898,13 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
}
static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
- uint8_t mode)
+ u8 mode)
{
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
- uint8_t mode)
+ u8 mode)
{
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
@@ -915,11 +913,11 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
int i, j;
- uint8_t set_buf_index[2];
- uint8_t av_split;
- uint8_t buf_size;
- uint8_t buf[48];
- uint8_t *pos;
+ u8 set_buf_index[2];
+ u8 av_split;
+ u8 buf_size;
+ u8 buf[48];
+ u8 *pos;
intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
@@ -942,11 +940,11 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
#endif
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
- unsigned if_index, uint8_t tx_rate,
- const uint8_t *data, unsigned length)
+ unsigned int if_index, u8 tx_rate,
+ const u8 *data, unsigned int length)
{
- uint8_t set_buf_index[2] = { if_index, 0 };
- uint8_t hbuf_size, tmp[8];
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tmp[8];
int i;
if (!intel_sdvo_set_value(intel_sdvo,
@@ -981,29 +979,30 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &pipe_config->base.adjusted_mode,
- false);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
- if (intel_sdvo->rgb_quant_range_selectable) {
- if (pipe_config->limited_color_range)
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_FULL;
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ pipe_config->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
@@ -1018,7 +1017,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
- uint32_t format_map;
+ u32 format_map;
format_map = 1 << conn_state->tv.mode;
memset(&format, 0, sizeof(format));
@@ -1108,9 +1107,9 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
pipe_config->clock_set = true;
}
-static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_sdvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector_state *intel_sdvo_state =
@@ -1135,7 +1134,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
*/
if (IS_TV(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
- return false;
+ return -EINVAL;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
intel_sdvo_connector,
@@ -1145,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
} else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo_connector->base.panel.fixed_mode))
- return false;
+ return -EINVAL;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
intel_sdvo_connector,
@@ -1154,7 +1153,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
/*
* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1194,7 +1193,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
- return true;
+ return 0;
}
#define UPDATE_PROPERTY(input, NAME) \
@@ -1209,7 +1208,7 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
- uint16_t val;
+ u16 val;
if (intel_sdvo_conn->left)
UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
@@ -1316,7 +1315,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+ intel_sdvo_set_avi_infoframe(intel_sdvo,
+ crtc_state, conn_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1692,10 +1692,10 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
- uint16_t hotplug;
+ u16 hotplug;
if (!I915_HAS_HOTPLUG(dev_priv))
return 0;
@@ -1802,8 +1802,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo_connector->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
- intel_sdvo->rgb_quant_range_selectable =
- drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@@ -1828,7 +1826,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
- uint16_t response;
+ u16 response;
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
@@ -1852,7 +1850,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
- intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
@@ -1980,7 +1977,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
const struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_sdtv_resolution_request tv_res;
- uint32_t reply = 0, format_map = 0;
+ u32 reply = 0, format_map = 0;
int i;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2065,7 +2062,7 @@ static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
@@ -2124,7 +2121,7 @@ static int
intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
@@ -2273,7 +2270,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
static void
intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
{
- uint16_t mask = 0;
+ u16 mask = 0;
unsigned int num_bits;
/*
@@ -2674,7 +2671,7 @@ err:
}
static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
@@ -2750,7 +2747,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct intel_sdvo_tv_format format;
- uint32_t format_map, i;
+ u32 format_map, i;
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
@@ -2817,7 +2814,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
struct drm_connector_state *conn_state = connector->state;
struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
- uint16_t response, data_value[2];
+ u16 response, data_value[2];
/* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
@@ -2928,7 +2925,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector = &intel_sdvo_connector->base.base;
- uint16_t response, data_value[2];
+ u16 response, data_value[2];
ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
@@ -2942,7 +2939,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
{
union {
struct intel_sdvo_enhancements_reply reply;
- uint16_t response;
+ u16 response;
} enhancements;
BUILD_BUG_ON(sizeof(enhancements) != 2);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 5170a0f5fe7b..b56a1a9ad01d 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,7 +29,6 @@
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
@@ -322,8 +321,8 @@ skl_program_scaler(struct intel_plane *plane,
&crtc_state->scaler_state.scalers[scaler_id];
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
@@ -478,16 +477,23 @@ skl_program_plane(struct intel_plane *plane,
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t x = plane_state->color_plane[color_plane].x;
- uint32_t y = plane_state->color_plane[color_plane].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 x = plane_state->color_plane[color_plane].x;
+ u32 y = plane_state->color_plane[color_plane].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
struct intel_plane *linked = plane_state->linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
+ u32 plane_color_ctl = 0;
unsigned long irqflags;
u32 keymsk, keymax;
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -534,8 +540,7 @@ skl_program_plane(struct intel_plane *plane,
}
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(plane))
icl_program_input_csc(plane, crtc_state, plane_state);
@@ -619,17 +624,19 @@ skl_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -732,6 +739,11 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
+static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ return SP_GAMMA_ENABLE;
+}
+
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -740,7 +752,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
- sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
+ sprctl = SP_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
@@ -807,17 +819,19 @@ vlv_update_plane(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
- u32 sprctl = plane_state->ctl;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
+ u32 sprctl;
+
+ sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
/* Sizes are 0 based */
crtc_w--;
@@ -883,21 +897,36 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
+static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 sprctl = 0;
+
+ sprctl |= SPRITE_GAMMA_ENABLE;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+ return sprctl;
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -908,14 +937,11 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
- sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
+ sprctl = SPRITE_ENABLE;
if (IS_IVYBRIDGE(dev_priv))
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
@@ -967,20 +993,22 @@ ivb_update_plane(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 sprctl, sprscale = 0;
unsigned long irqflags;
+ sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -1052,17 +1080,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -1075,6 +1105,11 @@ g4x_sprite_max_stride(struct intel_plane *plane,
return 16384;
}
+static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ return DVS_GAMMA_ENABLE;
+}
+
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1085,9 +1120,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvscntr;
- dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
+ dvscntr = DVS_ENABLE;
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -1141,20 +1176,22 @@ g4x_update_plane(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
- u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 dvscntr, dvsscale = 0;
unsigned long irqflags;
+ dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -1218,17 +1255,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -1699,7 +1738,7 @@ out:
return ret;
}
-static const uint32_t g4x_plane_formats[] = {
+static const u32 g4x_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -1707,13 +1746,13 @@ static const uint32_t g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint64_t i9xx_plane_format_modifiers[] = {
+static const u64 i9xx_plane_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
-static const uint32_t snb_plane_formats[] = {
+static const u32 snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1722,7 +1761,7 @@ static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t vlv_plane_formats[] = {
+static const u32 vlv_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
@@ -1736,7 +1775,7 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t skl_plane_formats[] = {
+static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1751,7 +1790,7 @@ static const uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t skl_planar_formats[] = {
+static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1767,7 +1806,7 @@ static const uint32_t skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
-static const uint64_t skl_plane_format_modifiers_noccs[] = {
+static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
@@ -1775,7 +1814,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static const uint64_t skl_plane_format_modifiers_ccs[] = {
+static const u64 skl_plane_format_modifiers_ccs[] = {
I915_FORMAT_MOD_Yf_TILED_CCS,
I915_FORMAT_MOD_Y_TILED_CCS,
I915_FORMAT_MOD_Yf_TILED,
@@ -1983,7 +2022,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
return false;
if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -2163,7 +2202,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 860f306a23ba..3924c4944e1f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -30,7 +30,6 @@
* Integrated TV-out support for the 915GM and 945GM.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -307,7 +306,7 @@ struct tv_mode {
u32 clock;
u16 refresh; /* in millihertz (for precision) */
- u32 oversample;
+ u8 oversample;
u8 hsync_end;
u16 hblank_start, hblank_end, htotal;
bool progressive : 1, trilevel_sync : 1, component_only : 1;
@@ -340,7 +339,6 @@ struct tv_mode {
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
- u16 max_srcw;
};
@@ -379,8 +377,8 @@ static const struct tv_mode tv_modes[] = {
.name = "NTSC-M",
.clock = 108000,
.refresh = 59940,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
@@ -422,8 +420,8 @@ static const struct tv_mode tv_modes[] = {
.name = "NTSC-443",
.clock = 108000,
.refresh = 59940,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
.hsync_end = 64, .hblank_end = 124,
.hblank_start = 836, .htotal = 857,
@@ -464,8 +462,8 @@ static const struct tv_mode tv_modes[] = {
.name = "NTSC-J",
.clock = 108000,
.refresh = 59940,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
@@ -507,8 +505,8 @@ static const struct tv_mode tv_modes[] = {
.name = "PAL-M",
.clock = 108000,
.refresh = 59940,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
@@ -551,8 +549,8 @@ static const struct tv_mode tv_modes[] = {
.name = "PAL-N",
.clock = 108000,
.refresh = 50000,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
.hsync_end = 64, .hblank_end = 128,
.hblank_start = 844, .htotal = 863,
@@ -596,8 +594,8 @@ static const struct tv_mode tv_modes[] = {
.name = "PAL",
.clock = 108000,
.refresh = 50000,
- .oversample = TV_OVERSAMPLE_8X,
- .component_only = 0,
+ .oversample = 8,
+ .component_only = false,
.hsync_end = 64, .hblank_end = 142,
.hblank_start = 844, .htotal = 863,
@@ -636,10 +634,10 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "480p",
- .clock = 107520,
+ .clock = 108000,
.refresh = 59940,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
+ .oversample = 4,
+ .component_only = true,
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 857,
@@ -660,10 +658,10 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "576p",
- .clock = 107520,
+ .clock = 108000,
.refresh = 50000,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
+ .oversample = 4,
+ .component_only = true,
.hsync_end = 64, .hblank_end = 139,
.hblank_start = 859, .htotal = 863,
@@ -684,10 +682,10 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "720p@60Hz",
- .clock = 148800,
+ .clock = 148500,
.refresh = 60000,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
+ .oversample = 2,
+ .component_only = true,
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1649,
@@ -708,10 +706,10 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "720p@50Hz",
- .clock = 148800,
+ .clock = 148500,
.refresh = 50000,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
+ .oversample = 2,
+ .component_only = true,
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1979,
@@ -729,14 +727,13 @@ static const struct tv_mode tv_modes[] = {
.burst_ena = false,
.filter_table = filter_table,
- .max_srcw = 800
},
{
.name = "1080i@50Hz",
- .clock = 148800,
+ .clock = 148500,
.refresh = 50000,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
+ .oversample = 2,
+ .component_only = true,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2639,
@@ -759,10 +756,10 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "1080i@60Hz",
- .clock = 148800,
+ .clock = 148500,
.refresh = 60000,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
+ .oversample = 2,
+ .component_only = true,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2199,
@@ -783,8 +780,115 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
+
+ {
+ .name = "1080p@30Hz",
+ .clock = 148500,
+ .refresh = 30000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+
+ {
+ .name = "1080p@50Hz",
+ .clock = 148500,
+ .refresh = 50000,
+ .oversample = 1,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+
+ {
+ .name = "1080p@60Hz",
+ .clock = 148500,
+ .refresh = 60000,
+ .oversample = 1,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
};
+struct intel_tv_connector_state {
+ struct drm_connector_state base;
+
+ /*
+ * May need to override the user margins for
+ * gen3 >1024 wide source vertical centering.
+ */
+ struct {
+ u16 top, bottom;
+ } margins;
+
+ bool bypass_vfilter;
+};
+
+#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+
+static struct drm_connector_state *
+intel_tv_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct intel_tv_connector_state *state;
+
+ state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
+ return &state->base;
+}
+
static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_tv, base);
@@ -860,45 +964,370 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_RANGE;
}
+static int
+intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
+{
+ if (tv_mode->progressive)
+ return tv_mode->nbr_end + 1;
+ else
+ return 2 * (tv_mode->nbr_end + 1);
+}
+
+static void
+intel_tv_mode_to_mode(struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode)
+{
+ mode->clock = tv_mode->clock /
+ (tv_mode->oversample >> !tv_mode->progressive);
+
+ /*
+ * tv_mode horizontal timings:
+ *
+ * hsync_end
+ * | hblank_end
+ * | | hblank_start
+ * | | | htotal
+ * | _______ |
+ * ____/ \___
+ * \__/ \
+ */
+ mode->hdisplay =
+ tv_mode->hblank_start - tv_mode->hblank_end;
+ mode->hsync_start = mode->hdisplay +
+ tv_mode->htotal - tv_mode->hblank_start;
+ mode->hsync_end = mode->hsync_start +
+ tv_mode->hsync_end;
+ mode->htotal = tv_mode->htotal + 1;
+
+ /*
+ * tv_mode vertical timings:
+ *
+ * vsync_start
+ * | vsync_end
+ * | | vi_end nbr_end
+ * | | | |
+ * | | _______
+ * \__ ____/ \
+ * \__/
+ */
+ mode->vdisplay = intel_tv_mode_vdisplay(tv_mode);
+ if (tv_mode->progressive) {
+ mode->vsync_start = mode->vdisplay +
+ tv_mode->vsync_start_f1 + 1;
+ mode->vsync_end = mode->vsync_start +
+ tv_mode->vsync_len;
+ mode->vtotal = mode->vdisplay +
+ tv_mode->vi_end_f1 + 1;
+ } else {
+ mode->vsync_start = mode->vdisplay +
+ tv_mode->vsync_start_f1 + 1 +
+ tv_mode->vsync_start_f2 + 1;
+ mode->vsync_end = mode->vsync_start +
+ 2 * tv_mode->vsync_len;
+ mode->vtotal = mode->vdisplay +
+ tv_mode->vi_end_f1 + 1 +
+ tv_mode->vi_end_f2 + 1;
+ }
+
+ /* TV has it's own notion of sync and other mode flags, so clear them. */
+ mode->flags = 0;
+
+ mode->vrefresh = 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ snprintf(mode->name, sizeof(mode->name),
+ "%dx%d%c (%s)",
+ mode->hdisplay, mode->vdisplay,
+ tv_mode->progressive ? 'p' : 'i',
+ tv_mode->name);
+}
+
+static void intel_tv_scale_mode_horiz(struct drm_display_mode *mode,
+ int hdisplay, int left_margin,
+ int right_margin)
+{
+ int hsync_start = mode->hsync_start - mode->hdisplay + right_margin;
+ int hsync_end = mode->hsync_end - mode->hdisplay + right_margin;
+ int new_htotal = mode->htotal * hdisplay /
+ (mode->hdisplay - left_margin - right_margin);
+
+ mode->clock = mode->clock * new_htotal / mode->htotal;
+
+ mode->hdisplay = hdisplay;
+ mode->hsync_start = hdisplay + hsync_start * new_htotal / mode->htotal;
+ mode->hsync_end = hdisplay + hsync_end * new_htotal / mode->htotal;
+ mode->htotal = new_htotal;
+}
+
+static void intel_tv_scale_mode_vert(struct drm_display_mode *mode,
+ int vdisplay, int top_margin,
+ int bottom_margin)
+{
+ int vsync_start = mode->vsync_start - mode->vdisplay + bottom_margin;
+ int vsync_end = mode->vsync_end - mode->vdisplay + bottom_margin;
+ int new_vtotal = mode->vtotal * vdisplay /
+ (mode->vdisplay - top_margin - bottom_margin);
+
+ mode->clock = mode->clock * new_vtotal / mode->vtotal;
+
+ mode->vdisplay = vdisplay;
+ mode->vsync_start = vdisplay + vsync_start * new_vtotal / mode->vtotal;
+ mode->vsync_end = vdisplay + vsync_end * new_vtotal / mode->vtotal;
+ mode->vtotal = new_vtotal;
+}
static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ struct drm_display_mode mode = {};
+ u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
+ struct tv_mode tv_mode = {};
+ int hdisplay = adjusted_mode->crtc_hdisplay;
+ int vdisplay = adjusted_mode->crtc_vdisplay;
+ int xsize, ysize, xpos, ypos;
+
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ tv_ctl = I915_READ(TV_CTL);
+ hctl1 = I915_READ(TV_H_CTL_1);
+ hctl3 = I915_READ(TV_H_CTL_3);
+ vctl1 = I915_READ(TV_V_CTL_1);
+ vctl2 = I915_READ(TV_V_CTL_2);
+
+ tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
+ tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
+
+ tv_mode.hblank_start = (hctl3 & TV_HBLANK_START_MASK) >> TV_HBLANK_START_SHIFT;
+ tv_mode.hblank_end = (hctl3 & TV_HSYNC_END_MASK) >> TV_HBLANK_END_SHIFT;
+
+ tv_mode.nbr_end = (vctl1 & TV_NBR_END_MASK) >> TV_NBR_END_SHIFT;
+ tv_mode.vi_end_f1 = (vctl1 & TV_VI_END_F1_MASK) >> TV_VI_END_F1_SHIFT;
+ tv_mode.vi_end_f2 = (vctl1 & TV_VI_END_F2_MASK) >> TV_VI_END_F2_SHIFT;
+
+ tv_mode.vsync_len = (vctl2 & TV_VSYNC_LEN_MASK) >> TV_VSYNC_LEN_SHIFT;
+ tv_mode.vsync_start_f1 = (vctl2 & TV_VSYNC_START_F1_MASK) >> TV_VSYNC_START_F1_SHIFT;
+ tv_mode.vsync_start_f2 = (vctl2 & TV_VSYNC_START_F2_MASK) >> TV_VSYNC_START_F2_SHIFT;
+
+ tv_mode.clock = pipe_config->port_clock;
+
+ tv_mode.progressive = tv_ctl & TV_PROGRESSIVE;
+
+ switch (tv_ctl & TV_OVERSAMPLE_MASK) {
+ case TV_OVERSAMPLE_8X:
+ tv_mode.oversample = 8;
+ break;
+ case TV_OVERSAMPLE_4X:
+ tv_mode.oversample = 4;
+ break;
+ case TV_OVERSAMPLE_2X:
+ tv_mode.oversample = 2;
+ break;
+ default:
+ tv_mode.oversample = 1;
+ break;
+ }
+
+ tmp = I915_READ(TV_WIN_POS);
+ xpos = tmp >> 16;
+ ypos = tmp & 0xffff;
+
+ tmp = I915_READ(TV_WIN_SIZE);
+ xsize = tmp >> 16;
+ ysize = tmp & 0xffff;
+
+ intel_tv_mode_to_mode(&mode, &tv_mode);
+
+ DRM_DEBUG_KMS("TV mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ intel_tv_scale_mode_horiz(&mode, hdisplay,
+ xpos, mode.hdisplay - xsize - xpos);
+ intel_tv_scale_mode_vert(&mode, vdisplay,
+ ypos, mode.vdisplay - ysize - ypos);
+
+ adjusted_mode->crtc_clock = mode.clock;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ adjusted_mode->crtc_clock /= 2;
+
+ /* pixel counter doesn't work on i965gm TV output */
+ if (IS_I965GM(dev_priv))
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
-static bool
+static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ return IS_GEN(dev_priv, 3) && hdisplay > 1024;
+}
+
+static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
+ const struct drm_connector_state *conn_state,
+ int vdisplay)
+{
+ return tv_mode->crtc_vdisplay -
+ conn_state->tv.margins.top -
+ conn_state->tv.margins.bottom !=
+ vdisplay;
+}
+
+static int
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_tv_connector_state *tv_conn_state =
+ to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
+ int hdisplay = adjusted_mode->crtc_hdisplay;
+ int vdisplay = adjusted_mode->crtc_vdisplay;
if (!tv_mode)
- return false;
+ return -EINVAL;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- adjusted_mode->crtc_clock = tv_mode->clock;
+
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
- /* TV has it's own notion of sync and other mode flags, so clear them. */
- adjusted_mode->flags = 0;
+ pipe_config->port_clock = tv_mode->clock;
+
+ intel_tv_mode_to_mode(adjusted_mode, tv_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+ !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
+ int extra, top, bottom;
+
+ extra = adjusted_mode->crtc_vdisplay - vdisplay;
+
+ if (extra < 0) {
+ DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+ return -EINVAL;
+ }
+
+ /* Need to turn off the vertical filter and center the image */
+
+ /* Attempt to maintain the relative sizes of the margins */
+ top = conn_state->tv.margins.top;
+ bottom = conn_state->tv.margins.bottom;
+
+ if (top + bottom)
+ top = extra * top / (top + bottom);
+ else
+ top = extra / 2;
+ bottom = extra - top;
+
+ tv_conn_state->margins.top = top;
+ tv_conn_state->margins.bottom = bottom;
+
+ tv_conn_state->bypass_vfilter = true;
+
+ if (!tv_mode->progressive) {
+ adjusted_mode->clock /= 2;
+ adjusted_mode->crtc_clock /= 2;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ } else {
+ tv_conn_state->margins.top = conn_state->tv.margins.top;
+ tv_conn_state->margins.bottom = conn_state->tv.margins.bottom;
+
+ tv_conn_state->bypass_vfilter = false;
+ }
+
+ DRM_DEBUG_KMS("TV mode:\n");
+ drm_mode_debug_printmodeline(adjusted_mode);
/*
- * FIXME: We don't check whether the input mode is actually what we want
- * or whether userspace is doing something stupid.
+ * The pipe scanline counter behaviour looks as follows when
+ * using the TV encoder:
+ *
+ * time ->
+ *
+ * dsl=vtotal-1 | |
+ * || ||
+ * ___| | ___| |
+ * / | / |
+ * / | / |
+ * dsl=0 ___/ |_____/ |
+ * | | | | | |
+ * ^ ^ ^ ^ ^
+ * | | | | pipe vblank/first part of tv vblank
+ * | | | bottom margin
+ * | | active
+ * | top margin
+ * remainder of tv vblank
+ *
+ * When the TV encoder is used the pipe wants to run faster
+ * than expected rate. During the active portion the TV
+ * encoder stalls the pipe every few lines to keep it in
+ * check. When the TV encoder reaches the bottom margin the
+ * pipe simply stops. Once we reach the TV vblank the pipe is
+ * no longer stalled and it runs at the max rate (apparently
+ * oversample clock on gen3, cdclk on gen4). Once the pipe
+ * reaches the pipe vtotal the pipe stops for the remainder
+ * of the TV vblank/top margin. The pipe starts up again when
+ * the TV encoder exits the top margin.
+ *
+ * To avoid huge hassles for vblank timestamping we scale
+ * the pipe timings as if the pipe always runs at the average
+ * rate it maintains during the active period. This also
+ * gives us a reasonable guesstimate as to the pixel rate.
+ * Due to the variation in the actual pipe speed the scanline
+ * counter will give us slightly erroneous results during the
+ * TV vblank/margins. But since vtotal was selected such that
+ * it matches the average rate of the pipe during the active
+ * portion the error shouldn't cause any serious grief to
+ * vblank timestamps.
+ *
+ * For posterity here is the empirically derived formula
+ * that gives us the maximum length of the pipe vblank
+ * we can use without causing display corruption. Following
+ * this would allow us to have a ticking scanline counter
+ * everywhere except during the bottom margin (there the
+ * pipe always stops). Ie. this would eliminate the second
+ * flat portion of the above graph. However this would also
+ * complicate vblank timestamping as the pipe vtotal would
+ * no longer match the average rate the pipe runs at during
+ * the active portion. Hence following this formula seems
+ * more trouble that it's worth.
+ *
+ * if (IS_GEN(dev_priv, 4)) {
+ * num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
+ * den = tv_mode->clock;
+ * } else {
+ * num = tv_mode->oversample >> !tv_mode->progressive;
+ * den = 1;
+ * }
+ * max_pipe_vblank_len ~=
+ * (num * tv_htotal * (tv_vblank_len + top_margin)) /
+ * (den * pipe_htotal);
*/
+ intel_tv_scale_mode_horiz(adjusted_mode, hdisplay,
+ conn_state->tv.margins.left,
+ conn_state->tv.margins.right);
+ intel_tv_scale_mode_vert(adjusted_mode, vdisplay,
+ tv_conn_state->margins.top,
+ tv_conn_state->margins.bottom);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ adjusted_mode->name[0] = '\0';
+
+ /* pixel counter doesn't work on i965gm TV output */
+ if (IS_I965GM(dev_priv))
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- return true;
+ return 0;
}
static void
@@ -987,14 +1416,16 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
+ const struct intel_tv_connector_state *tv_conn_state =
+ to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
- u32 tv_ctl;
+ u32 tv_ctl, tv_filter_ctl;
u32 scctl1, scctl2, scctl3;
int i, j;
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
- int xpos = 0x0, ypos = 0x0;
+ int xpos, ypos;
unsigned int xsize, ysize;
if (!tv_mode)
@@ -1030,7 +1461,21 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
}
tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
- tv_ctl |= tv_mode->oversample;
+
+ switch (tv_mode->oversample) {
+ case 8:
+ tv_ctl |= TV_OVERSAMPLE_8X;
+ break;
+ case 4:
+ tv_ctl |= TV_OVERSAMPLE_4X;
+ break;
+ case 2:
+ tv_ctl |= TV_OVERSAMPLE_2X;
+ break;
+ default:
+ tv_ctl |= TV_OVERSAMPLE_NONE;
+ break;
+ }
if (tv_mode->progressive)
tv_ctl |= TV_PROGRESSIVE;
@@ -1082,19 +1527,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
assert_pipe_disabled(dev_priv, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ tv_filter_ctl = TV_AUTO_SCALE;
+ if (tv_conn_state->bypass_vfilter)
+ tv_filter_ctl |= TV_V_FILTER_BYPASS;
+ I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
- if (tv_mode->progressive)
- ysize = tv_mode->nbr_end + 1;
- else
- ysize = 2*tv_mode->nbr_end + 1;
+ ysize = intel_tv_mode_vdisplay(tv_mode);
- xpos += conn_state->tv.margins.left;
- ypos += conn_state->tv.margins.top;
+ xpos = conn_state->tv.margins.left;
+ ypos = tv_conn_state->margins.top;
xsize -= (conn_state->tv.margins.left +
conn_state->tv.margins.right);
- ysize -= (conn_state->tv.margins.top +
- conn_state->tv.margins.bottom);
+ ysize -= (tv_conn_state->margins.top +
+ tv_conn_state->margins.bottom);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1111,23 +1557,6 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
I915_WRITE(TV_CTL, tv_ctl);
}
-static const struct drm_display_mode reported_modes[] = {
- {
- .name = "NTSC 480i",
- .clock = 107520,
- .hdisplay = 1280,
- .hsync_start = 1368,
- .hsync_end = 1496,
- .htotal = 1712,
-
- .vdisplay = 1024,
- .vsync_start = 1027,
- .vsync_end = 1034,
- .vtotal = 1104,
- .type = DRM_MODE_TYPE_DRIVER,
- },
-};
-
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
@@ -1234,16 +1663,18 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i;
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
- tv_mode->component_only)
+ /* Component supports everything so we can keep the current mode */
+ if (intel_tv->type == DRM_MODE_CONNECTOR_Component)
return;
+ /* If the current mode is fine don't change it */
+ if (!tv_mode->component_only)
+ return;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- tv_mode = tv_modes + i;
+ tv_mode = &tv_modes[i];
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
- tv_mode->component_only)
+ if (!tv_mode->component_only)
break;
}
@@ -1255,7 +1686,6 @@ intel_tv_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct drm_display_mode mode;
struct intel_tv *intel_tv = intel_attached_tv(connector);
enum drm_connector_status status;
int type;
@@ -1264,13 +1694,11 @@ intel_tv_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
- mode = reported_modes[0];
-
if (force) {
struct intel_load_detect_pipe tmp;
int ret;
- ret = intel_get_load_detect_pipe(connector, &mode, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
if (ret < 0)
return ret;
@@ -1294,84 +1722,85 @@ intel_tv_detect(struct drm_connector *connector,
}
static const struct input_res {
- const char *name;
- int w, h;
+ u16 w, h;
} input_res_table[] = {
- {"640x480", 640, 480},
- {"800x600", 800, 600},
- {"1024x768", 1024, 768},
- {"1280x1024", 1280, 1024},
- {"848x480", 848, 480},
- {"1280x720", 1280, 720},
- {"1920x1080", 1920, 1080},
+ { 640, 480 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 1024 },
+ { 848, 480 },
+ { 1280, 720 },
+ { 1920, 1080 },
};
-/*
- * Chose preferred mode according to line number of TV format
- */
+/* Choose preferred mode according to line number of TV format */
+static bool
+intel_tv_is_preferred_mode(const struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode)
+{
+ int vdisplay = intel_tv_mode_vdisplay(tv_mode);
+
+ /* prefer 480 line modes for all SD TV modes */
+ if (vdisplay <= 576)
+ vdisplay = 480;
+
+ return vdisplay == mode->vdisplay;
+}
+
static void
-intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
- struct drm_display_mode *mode_ptr)
+intel_tv_set_mode_type(struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode)
{
- if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
- mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
- else if (tv_mode->nbr_end > 480) {
- if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
- if (mode_ptr->vdisplay == 720)
- mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
- } else if (mode_ptr->vdisplay == 1080)
- mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
- }
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (intel_tv_is_preferred_mode(mode, tv_mode))
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
}
static int
intel_tv_get_modes(struct drm_connector *connector)
{
- struct drm_display_mode *mode_ptr;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int j, count = 0;
- u64 tmp;
+ int i, count = 0;
- for (j = 0; j < ARRAY_SIZE(input_res_table);
- j++) {
- const struct input_res *input = &input_res_table[j];
- unsigned int hactive_s = input->w;
- unsigned int vactive_s = input->h;
+ for (i = 0; i < ARRAY_SIZE(input_res_table); i++) {
+ const struct input_res *input = &input_res_table[i];
+ struct drm_display_mode *mode;
- if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+ if (input->w > 1024 &&
+ !tv_mode->progressive &&
+ !tv_mode->component_only)
continue;
- if (input->w > 1024 && (!tv_mode->progressive
- && !tv_mode->component_only))
+ /* no vertical scaling with wide sources on gen3 */
+ if (IS_GEN(dev_priv, 3) && input->w > 1024 &&
+ input->h > intel_tv_mode_vdisplay(tv_mode))
continue;
- mode_ptr = drm_mode_create(connector->dev);
- if (!mode_ptr)
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
continue;
- strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
-
- mode_ptr->hdisplay = hactive_s;
- mode_ptr->hsync_start = hactive_s + 1;
- mode_ptr->hsync_end = hactive_s + 64;
- if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
- mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
- mode_ptr->htotal = hactive_s + 96;
-
- mode_ptr->vdisplay = vactive_s;
- mode_ptr->vsync_start = vactive_s + 1;
- mode_ptr->vsync_end = vactive_s + 32;
- if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
- mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
- mode_ptr->vtotal = vactive_s + 33;
-
- tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal);
- tmp *= mode_ptr->htotal;
- tmp = div_u64(tmp, 1000000);
- mode_ptr->clock = (int) tmp;
-
- mode_ptr->type = DRM_MODE_TYPE_DRIVER;
- intel_tv_choose_preferred_modes(tv_mode, mode_ptr);
- drm_mode_probed_add(connector, mode_ptr);
+
+ /*
+ * We take the TV mode and scale it to look
+ * like it had the expected h/vdisplay. This
+ * provides the most information to userspace
+ * about the actual timings of the mode. We
+ * do ignore the margins though.
+ */
+ intel_tv_mode_to_mode(mode, tv_mode);
+ if (count == 0) {
+ DRM_DEBUG_KMS("TV mode:\n");
+ drm_mode_debug_printmodeline(mode);
+ }
+ intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
+ intel_tv_scale_mode_vert(mode, input->h, 0, 0);
+ intel_tv_set_mode_type(mode, tv_mode);
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
count++;
}
@@ -1384,7 +1813,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
.destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = intel_tv_connector_duplicate_state,
};
static int intel_tv_atomic_check(struct drm_connector *connector,
@@ -1531,11 +1960,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ /* 1080p50/1080p60 not supported on gen3 */
+ if (IS_GEN(dev_priv, 3) &&
+ tv_modes[i].oversample == 1)
+ break;
+
tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev,
- ARRAY_SIZE(tv_modes),
- tv_format_names);
+ }
+ drm_mode_create_tv_properties(dev, i, tv_format_names);
drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
state->tv.mode);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b34c318b238d..e711eb3268bc 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -26,6 +26,7 @@
#include "intel_guc_submission.h"
#include "intel_guc.h"
#include "i915_drv.h"
+#include "i915_reset.h"
static void guc_free_load_err_log(struct intel_guc *guc);
@@ -71,7 +72,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -112,11 +113,11 @@ static void sanitize_options_early(struct drm_i915_private *i915)
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission()),
- yesno(intel_uc_is_using_huc()));
+ yesno(intel_uc_is_using_guc_submission(i915)),
+ yesno(intel_uc_is_using_huc(i915)));
/* Verify GuC firmware availability */
- if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+ if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
!HAS_GUC(i915) ? "no GuC hardware" :
@@ -124,7 +125,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
}
/* Verify HuC firmware availability */
- if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+ if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
!HAS_HUC(i915) ? "no HuC hardware" :
@@ -136,7 +137,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
i915_modparams.guc_log_level =
__get_default_guc_log_level(i915);
- if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
+ if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
!HAS_GUC(i915) ? "no GuC hardware" :
@@ -354,7 +355,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(i915))
+ if (IS_GEN(i915, 9))
attempts = 3;
else
attempts = 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 25d73ada74ae..870faf9011b9 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -41,19 +41,19 @@ void intel_uc_fini(struct drm_i915_private *dev_priv);
int intel_uc_suspend(struct drm_i915_private *dev_priv);
int intel_uc_resume(struct drm_i915_private *dev_priv);
-static inline bool intel_uc_is_using_guc(void)
+static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc > 0;
}
-static inline bool intel_uc_is_using_guc_submission(void)
+static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
}
-static inline bool intel_uc_is_using_huc(void)
+static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index fd496416087c..becf05ebae4d 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
size_t size;
int err;
+ if (!uc_fw->path) {
+ dev_info(dev_priv->drm.dev,
+ "%s: No firmware was defined for %s!\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform));
+ return;
+ }
+
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
- if (!uc_fw->path)
- return;
-
uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9289515108c3..75646a1e0051 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -528,7 +528,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret |= vlv_check_for_unclaimed_mmio(dev_priv);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
ret |= gen6_check_for_fifo_debug(dev_priv);
return ret;
@@ -556,7 +556,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_get(dev_priv,
restore_forcewake);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv);
spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1398,7 +1398,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
dev_priv->uncore.fw_reset = 0;
dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
dev_priv->uncore.fw_clear = 0;
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
+ } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1503,7 +1503,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN6(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1567,13 +1567,13 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
- if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
- } else if (IS_GEN5(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 5)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
- } else if (IS_GEN(dev_priv, 6, 7)) {
+ } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
if (IS_VALLEYVIEW(dev_priv)) {
@@ -1582,7 +1582,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
} else {
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else if (IS_GEN8(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 8)) {
if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1592,7 +1592,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else if (IS_GEN(dev_priv, 9, 10)) {
+ } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
+ intel_wakeref_t wakeref;
unsigned int flags;
int remain;
int ret = 0;
@@ -1695,286 +1696,25 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
- intel_runtime_pm_get(dev_priv);
- if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
- reg->val = I915_READ64_2x32(entry->offset_ldw,
- entry->offset_udw);
- else if (entry->size == 8 && flags == 0)
- reg->val = I915_READ64(entry->offset_ldw);
- else if (entry->size == 4 && flags == 0)
- reg->val = I915_READ(entry->offset_ldw);
- else if (entry->size == 2 && flags == 0)
- reg->val = I915_READ16(entry->offset_ldw);
- else if (entry->size == 1 && flags == 0)
- reg->val = I915_READ8(entry->offset_ldw);
- else
- ret = -EINVAL;
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-}
-
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- const u32 base = engine->mmio_base;
-
- if (intel_engine_stop_cs(engine))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
-
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
-
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
- POSTING_READ_FW(RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- I915_WRITE_FW(RING_CTL(base), 0);
-
- /* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
-}
-
-static void i915_stop_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (INTEL_GEN(dev_priv) < 3)
- return;
-
- for_each_engine_masked(engine, dev_priv, engine_mask, id)
- gen3_stop_engine(engine);
-}
-
-static bool i915_in_reset(struct pci_dev *pdev)
-{
- u8 gdrst;
-
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return gdrst & GRDOM_RESET_STATUS;
-}
-
-static int i915_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int err;
-
- /* Assert reset for at least 20 usec, and wait for acknowledgement. */
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- usleep_range(50, 200);
- err = wait_for(i915_in_reset(pdev), 500);
-
- /* Clear the reset request. */
- pci_write_config_byte(pdev, I915_GDRST, 0);
- usleep_range(50, 200);
- if (!err)
- err = wait_for(!i915_in_reset(pdev), 500);
-
- return err;
-}
-
-static bool g4x_reset_complete(struct pci_dev *pdev)
-{
- u8 gdrst;
-
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int g33_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for(g4x_reset_complete(pdev), 500);
-}
-
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int ret;
-
- /* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ(VDECCLK_GATE_D);
-
- pci_write_config_byte(pdev, I915_GDRST,
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
- goto out;
- }
-
- pci_write_config_byte(pdev, I915_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
- goto out;
- }
-
-out:
- pci_write_config_byte(pdev, I915_GDRST, 0);
-
- I915_WRITE(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ(VDECCLK_GATE_D);
-
- return ret;
-}
-
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- int ret;
-
- I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = intel_wait_for_register(dev_priv,
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
- 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
- goto out;
- }
-
- I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = intel_wait_for_register(dev_priv,
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
- 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
- goto out;
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
+ reg->val = I915_READ64_2x32(entry->offset_ldw,
+ entry->offset_udw);
+ else if (entry->size == 8 && flags == 0)
+ reg->val = I915_READ64(entry->offset_ldw);
+ else if (entry->size == 4 && flags == 0)
+ reg->val = I915_READ(entry->offset_ldw);
+ else if (entry->size == 2 && flags == 0)
+ reg->val = I915_READ16(entry->offset_ldw);
+ else if (entry->size == 1 && flags == 0)
+ reg->val = I915_READ8(entry->offset_ldw);
+ else
+ ret = -EINVAL;
}
-out:
- I915_WRITE(ILK_GDSR, 0);
- POSTING_READ(ILK_GDSR);
return ret;
}
-/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
- u32 hw_domain_mask)
-{
- int err;
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
-
- /* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(dev_priv,
- GEN6_GDRST, hw_domain_mask, 0,
- 500, 0,
- NULL);
- if (err)
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
-
- return err;
-}
-
-/**
- * gen6_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- * @retry: the count of of previous attempts to reset.
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen6_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN6_GRDOM_RENDER,
- [BCS] = GEN6_GRDOM_BLT,
- [VCS] = GEN6_GRDOM_MEDIA,
- [VCS2] = GEN8_GRDOM_MEDIA2,
- [VECS] = GEN6_GRDOM_VECS,
- };
- u32 hw_mask;
-
- if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN6_GRDOM_FULL;
- } else {
- unsigned int tmp;
-
- hw_mask = 0;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- hw_mask |= hw_engine_mask[engine->id];
- }
-
- return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
-/**
- * gen11_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen11_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask)
-{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN11_GRDOM_RENDER,
- [BCS] = GEN11_GRDOM_BLT,
- [VCS] = GEN11_GRDOM_MEDIA,
- [VCS2] = GEN11_GRDOM_MEDIA2,
- [VCS3] = GEN11_GRDOM_MEDIA3,
- [VCS4] = GEN11_GRDOM_MEDIA4,
- [VECS] = GEN11_GRDOM_VECS,
- [VECS2] = GEN11_GRDOM_VECS2,
- };
- u32 hw_mask;
-
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
- if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN11_GRDOM_FULL;
- } else {
- unsigned int tmp;
-
- hw_mask = 0;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- hw_mask |= hw_engine_mask[engine->id];
- }
-
- return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
/**
* __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
@@ -2079,202 +1819,15 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
+ /* just trace the final value */
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
if (out_value)
*out_value = reg_value;
return ret;
}
-static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
- ret = __intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700, 0,
- NULL);
- if (ret)
- DRM_ERROR("%s: reset request timeout\n", engine->name);
-
- return ret;
-}
-
-static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-}
-
-static int reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
- unsigned int retry)
-{
- if (INTEL_GEN(i915) >= 11)
- return gen11_reset_engines(i915, engine_mask);
- else
- return gen6_reset_engines(i915, engine_mask, retry);
-}
-
-static int gen8_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct intel_engine_cs *engine;
- const bool reset_non_ready = retry >= 1;
- unsigned int tmp;
- int ret;
-
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- ret = gen8_engine_reset_prepare(engine);
- if (ret && !reset_non_ready)
- goto skip_reset;
-
- /*
- * If this is not the first failed attempt to prepare,
- * we decide to proceed anyway.
- *
- * By doing so we risk context corruption and with
- * some gens (kbl), possible system hang if reset
- * happens during active bb execution.
- *
- * We rather take context corruption instead of
- * failed reset with a wedged driver/gpu. And
- * active bb execution case should be covered by
- * i915_stop_engines we have before the reset.
- */
- }
-
- ret = reset_engines(dev_priv, engine_mask, retry);
-
-skip_reset:
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- gen8_engine_reset_cancel(engine);
-
- return ret;
-}
-
-typedef int (*reset_func)(struct drm_i915_private *,
- unsigned int engine_mask, unsigned int retry);
-
-static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
-{
- if (!i915_modparams.reset)
- return NULL;
-
- if (INTEL_GEN(dev_priv) >= 8)
- return gen8_reset_engines;
- else if (INTEL_GEN(dev_priv) >= 6)
- return gen6_reset_engines;
- else if (IS_GEN5(dev_priv))
- return ironlake_do_reset;
- else if (IS_G4X(dev_priv))
- return g4x_do_reset;
- else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- return g33_do_reset;
- else if (INTEL_GEN(dev_priv) >= 3)
- return i915_do_reset;
- else
- return NULL;
-}
-
-int intel_gpu_reset(struct drm_i915_private *dev_priv,
- const unsigned int engine_mask)
-{
- reset_func reset = intel_get_gpu_reset(dev_priv);
- unsigned int retry;
- int ret;
-
- GEM_BUG_ON(!engine_mask);
-
- /*
- * We want to perform per-engine reset from atomic context (e.g.
- * softirq), which imposes the constraint that we cannot sleep.
- * However, experience suggests that spending a bit of time waiting
- * for a reset helps in various cases, so for a full-device reset
- * we apply the opposite rule and wait if we want to. As we should
- * always follow up a failed per-engine reset with a full device reset,
- * being a little faster, stricter and more error prone for the
- * atomic case seems an acceptable compromise.
- *
- * Unfortunately this leads to a bimodal routine, when the goal was
- * to have a single reset function that worked for resetting any
- * number of engines simultaneously.
- */
- might_sleep_if(engine_mask == ALL_ENGINES);
-
- /*
- * If the power well sleeps during the reset, the reset
- * request may be dropped and never completes (causing -EIO).
- */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- for (retry = 0; retry < 3; retry++) {
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- i915_stop_engines(dev_priv, engine_mask);
-
- ret = -ENODEV;
- if (reset) {
- ret = reset(dev_priv, engine_mask, retry);
- GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
- engine_mask, ret, retry);
- }
- if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
- break;
-
- cond_resched();
- }
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
-bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
-{
- return intel_get_gpu_reset(dev_priv) != NULL;
-}
-
-bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
-{
- return (dev_priv->info.has_reset_engine &&
- i915_modparams.reset >= 2);
-}
-
-int intel_reset_guc(struct drm_i915_private *dev_priv)
-{
- u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
- GEN9_GRDOM_GUC;
- int ret;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
-
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(dev_priv, guc_domain);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
return check_for_unclaimed_mmio(dev_priv);
@@ -2321,7 +1874,7 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
} else if (INTEL_GEN(dev_priv) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(offset);
} else {
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
fw_domains = 0;
}
@@ -2341,12 +1894,12 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
fw_domains = __fwtable_reg_write_fw_domains(offset);
- } else if (IS_GEN8(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 8)) {
fw_domains = __gen8_reg_write_fw_domains(offset);
- } else if (IS_GEN(dev_priv, 6, 7)) {
+ } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
fw_domains = 0;
}
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index c56ba0e04044..23abf03736e7 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -6,7 +6,6 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
@@ -1083,6 +1082,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
- intel_display_power_put(dev_priv,
- intel_dsc_power_domain(old_crtc_state));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 92cb82dd0c07..f82a415ea2ba 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
{
int err = 0;
- if (IS_GEN9(i915))
+ if (IS_GEN(i915, 9))
err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
if (!err &&
- (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
+ (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
return err;
@@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
u32 guc_wopcm_rsvd;
int err;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
GEM_BUG_ON(!wopcm->size);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4f41e326f3f3..15f4a6dee5aa 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -142,7 +142,8 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
static void
-__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
{
struct i915_wa wa = {
.reg = reg,
@@ -153,16 +154,32 @@ __wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
_wa_add(wal, &wa);
}
-#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+}
+
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_write_masked_or(wal, reg, ~0, val);
+}
+
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ wa_write_masked_or(wal, reg, val, val);
+}
#define WA_SET_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+ wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+ wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
- WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
+ wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
@@ -366,7 +383,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -375,7 +392,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
@@ -532,6 +549,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
+ /* WaEnableFloatBlendOptimization:icl */
+ wa_write_masked_or(wal,
+ GEN10_CACHE_MODE_SS,
+ 0, /* write-only, so skip validation */
+ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -603,46 +626,8 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
static void
-wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa wa = {
- .reg = reg,
- .mask = val,
- .val = _MASKED_BIT_ENABLE(val)
- };
-
- _wa_add(wal, &wa);
-}
-
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
-{
- struct i915_wa wa = {
- .reg = reg,
- .mask = mask,
- .val = val
- };
-
- _wa_add(wal, &wa);
-}
-
-static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
-{
- wa_write_masked_or(wal, reg, ~0, val);
-}
-
-static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
-{
- wa_write_masked_or(wal, reg, val, val);
-}
-
-static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
-{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915))
wa_write_or(wal,
@@ -666,11 +651,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
@@ -684,11 +668,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaInPlaceDecompressionHang:bxt */
wa_write_or(wal,
@@ -696,11 +679,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
@@ -719,16 +701,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
}
-static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
@@ -741,10 +723,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void wa_init_mcr(struct drm_i915_private *dev_priv)
+static void
+wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- struct i915_wa_list *wal = &dev_priv->gt_wa_list;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 mcr_slice_subslice_mask;
/*
@@ -804,11 +786,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_mcr(i915);
+ wa_init_mcr(i915, wal);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
@@ -822,11 +803,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_mcr(i915);
+ wa_init_mcr(i915, wal);
/* WaInPlaceDecompressionHang:icl */
wa_write_or(wal,
@@ -879,12 +859,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+static void
+gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_start(wal, "GT");
-
if (INTEL_GEN(i915) < 8)
return;
else if (IS_BROADWELL(i915))
@@ -892,22 +869,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
else if (IS_CHERRYVIEW(i915))
return;
else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915);
+ skl_gt_workarounds_init(i915, wal);
else if (IS_BROXTON(i915))
- bxt_gt_workarounds_init(i915);
+ bxt_gt_workarounds_init(i915, wal);
else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915);
+ kbl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915);
+ glk_gt_workarounds_init(i915, wal);
else if (IS_COFFEELAKE(i915))
- cfl_gt_workarounds_init(i915);
+ cfl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
- cnl_gt_workarounds_init(i915);
+ cnl_gt_workarounds_init(i915, wal);
else if (IS_ICELAKE(i915))
- icl_gt_workarounds_init(i915);
+ icl_gt_workarounds_init(i915, wal);
else
MISSING_CASE(INTEL_GEN(i915));
+}
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+ gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
@@ -955,8 +939,6 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
- DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
@@ -1126,14 +1108,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(RING_NOPID(base)));
-
- DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->wa_list;
if (IS_ICELAKE(i915)) {
/* This is not an Wa. Enable for better image quality */
@@ -1190,7 +1170,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -1211,7 +1191,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
- if (IS_GEN9(i915)) {
+ if (IS_GEN(i915, 9)) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
@@ -1237,10 +1217,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
-static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->wa_list;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
@@ -1250,6 +1230,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
+static void
+engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine, wal);
+ else
+ xcs_engine_wa_init(engine, wal);
+}
+
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
@@ -1258,12 +1250,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
return;
wa_init_start(wal, engine->name);
-
- if (engine->id == RCS)
- rcs_engine_wa_init(engine);
- else
- xcs_engine_wa_init(engine);
-
+ engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
@@ -1273,11 +1260,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
- const char *from)
-{
- return wa_list_verify(engine->i915, &engine->wa_list, from);
-}
-
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 26c065c8d2c0..a9a2fa35876f 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
{
struct i915_request *rq;
struct i915_vma *batch;
- int flags = 0;
int err;
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_request;
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
}
err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
i915_gem_object_set_active_reference(batch->obj);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_request;
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+err_request:
if (err)
i915_request_skip(rq, err);
-
-err_request:
i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
return err;
}
@@ -1450,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
+ if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void)
};
struct drm_i915_private *dev_priv;
struct i915_hw_ppgtt *ppgtt;
- struct pci_dev *pdev;
int err;
dev_priv = mock_gem_device();
@@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void)
/* Pretend to be a device which supports the 48b PPGTT */
mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
- pdev = dev_priv->drm.pdev;
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
-
mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
if (IS_ERR(ppgtt)) {
@@ -1761,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
};
struct drm_file *file;
struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
int err;
if (!HAS_PPGTT(dev_priv)) {
@@ -1776,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1790,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
new file mode 100644
index 000000000000..337b1f98b923
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -0,0 +1,157 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+struct live_active {
+ struct i915_active base;
+ bool retired;
+};
+
+static void __live_active_retire(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ active->retired = true;
+}
+
+static int __live_active_setup(struct drm_i915_private *i915,
+ struct live_active *active)
+{
+ struct intel_engine_cs *engine;
+ struct i915_sw_fence *submit;
+ enum intel_engine_id id;
+ unsigned int count = 0;
+ int err = 0;
+
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit)
+ return -ENOMEM;
+
+ i915_active_init(i915, &active->base, __live_active_retire);
+ active->retired = false;
+
+ if (!i915_active_acquire(&active->base)) {
+ pr_err("First i915_active_acquire should report being idle\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+ if (err >= 0)
+ err = i915_active_ref(&active->base,
+ rq->fence.context, rq);
+ i915_request_add(rq);
+ if (err) {
+ pr_err("Failed to track active ref!\n");
+ break;
+ }
+
+ count++;
+ }
+
+ i915_active_release(&active->base);
+ if (active->retired && count) {
+ pr_err("i915_active retired before submission!\n");
+ err = -EINVAL;
+ }
+ if (active->base.count != count) {
+ pr_err("i915_active not tracking all requests, found %d, expected %d\n",
+ active->base.count, count);
+ err = -EINVAL;
+ }
+
+out:
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+
+ return err;
+}
+
+static int live_active_wait(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct live_active active;
+ intel_wakeref_t wakeref;
+ int err;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ err = __live_active_setup(i915, &active);
+
+ i915_active_wait(&active.base);
+ if (!active.retired) {
+ pr_err("i915_active not retired after waiting!\n");
+ err = -EINVAL;
+ }
+
+ i915_active_fini(&active.base);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_active_retire(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct live_active active;
+ intel_wakeref_t wakeref;
+ int err;
+
+ /* Check that we get a callback when requests are indirectly retired */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ err = __live_active_setup(i915, &active);
+
+ /* waits for & retires all requests */
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ if (!active.retired) {
+ pr_err("i915_active not retired after flushing!\n");
+ err = -EINVAL;
+ }
+
+ i915_active_fini(&active.base);
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+int i915_active_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_active_wait),
+ SUBTEST(live_active_retire),
+ };
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d0aa19d17653..e77b7ed449ae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915,
i915_request_add(rq);
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return err;
}
@@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915)
static void simulate_hibernate(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(i915);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@@ -93,39 +96,39 @@ static int pm_prepare(struct drm_i915_private *i915)
static void pm_suspend(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
-
- i915_gem_suspend_gtt_mappings(i915);
- i915_gem_suspend_late(i915);
+ intel_wakeref_t wakeref;
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ i915_gem_suspend_gtt_mappings(i915);
+ i915_gem_suspend_late(i915);
+ }
}
static void pm_hibernate(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
+ intel_wakeref_t wakeref;
- i915_gem_suspend_gtt_mappings(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ i915_gem_suspend_gtt_mappings(i915);
- i915_gem_freeze(i915);
- i915_gem_freeze_late(i915);
-
- intel_runtime_pm_put(i915);
+ i915_gem_freeze(i915);
+ i915_gem_freeze_late(i915);
+ }
}
static void pm_resume(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
+
/*
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- intel_runtime_pm_get(i915);
-
- intel_engines_sanitize(i915);
- i915_gem_sanitize(i915);
- i915_gem_resume(i915);
-
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ intel_engines_sanitize(i915, false);
+ i915_gem_sanitize(i915);
+ i915_gem_resume(i915);
+ }
}
static int igt_gem_suspend(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index f7392c1ffe75..fd89a5a33c1a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
@@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7d82043aff10..d00d0bb07784 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -24,9 +24,13 @@
#include <linux/prime_numbers.h>
+#include "../i915_reset.h"
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -34,84 +38,6 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
-struct live_test {
- struct drm_i915_private *i915;
- const char *func;
- const char *name;
-
- unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
-};
-
-static int begin_live_test(struct live_test *t,
- struct drm_i915_private *i915,
- const char *func,
- const char *name)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- t->i915 = i915;
- t->func = func;
- t->name = name;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err) {
- pr_err("%s(%s): failed to idle before, with err=%d!",
- func, name, err);
- return err;
- }
-
- i915->gpu_error.missed_irq_rings = 0;
- t->reset_global = i915_reset_count(&i915->gpu_error);
-
- for_each_engine(engine, i915, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
-
- return 0;
-}
-
-static int end_live_test(struct live_test *t)
-{
- struct drm_i915_private *i915 = t->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- return -EIO;
-
- if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
- pr_err("%s(%s): GPU was reset %d times!\n",
- t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_global);
- return -EIO;
- }
-
- for_each_engine(engine, i915, id) {
- if (t->reset_engine[id] ==
- i915_reset_engine_count(&i915->gpu_error, engine))
- continue;
-
- pr_err("%s(%s): engine '%s' was reset %d times!\n",
- t->func, t->name, engine->name,
- i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
- return -EIO;
- }
-
- if (i915->gpu_error.missed_irq_rings) {
- pr_err("%s(%s): Missed interrupts on engines %lx\n",
- t->func, t->name, i915->gpu_error.missed_irq_rings);
- return -EIO;
- }
-
- return 0;
-}
-
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -119,8 +45,9 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct drm_file *file;
- struct live_test t;
unsigned long n;
int err = -ENODEV;
@@ -140,7 +67,7 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -184,7 +111,7 @@ static int live_nop_switch(void *arg)
pr_info("Populated %d contexts on %s in %lluns\n",
nctx, engine->name, ktime_to_ns(times[1] - times[0]));
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
@@ -232,7 +159,7 @@ static int live_nop_switch(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_unlock;
@@ -243,7 +170,7 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
@@ -553,10 +480,10 @@ static int igt_ctx_exec(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct live_test t;
int err = -ENODEV;
/*
@@ -574,7 +501,7 @@ static int igt_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -593,6 +520,8 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ intel_wakeref_t wakeref;
+
if (!engine->context_size)
continue; /* No logical context support in HW */
@@ -607,9 +536,9 @@ static int igt_ctx_exec(void *arg)
}
}
- intel_runtime_pm_get(i915);
- err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ err = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -627,7 +556,7 @@ static int igt_ctx_exec(void *arg)
ncontexts++;
}
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+ ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -642,7 +571,7 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -650,6 +579,469 @@ out_unlock:
return err;
}
+static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj;
+ u32 *cmd;
+ int err;
+
+ if (INTEL_GEN(vma->vm->i915) < 8)
+ return ERR_PTR(-EINVAL);
+
+ obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
+ *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = upper_32_bits(vma->node.start);
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int
+emit_rpcs_query(struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_request **rq_out)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ struct i915_vma *vma;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ batch = rpcs_query_batch(vma);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_vma;
+ }
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ i915_vma_unpin(vma);
+
+ *rq_out = i915_request_get(rq);
+
+ i915_request_add(rq);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+err_vma:
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+#define TEST_IDLE BIT(0)
+#define TEST_BUSY BIT(1)
+#define TEST_RESET BIT(2)
+
+static int
+__sseu_prepare(struct drm_i915_private *i915,
+ const char *name,
+ unsigned int flags,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct igt_spinner **spin_out)
+{
+ int ret = 0;
+
+ if (flags & (TEST_BUSY | TEST_RESET)) {
+ struct igt_spinner *spin;
+ struct i915_request *rq;
+
+ spin = kzalloc(sizeof(*spin), GFP_KERNEL);
+ if (!spin) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = igt_spinner_init(spin, i915);
+ if (ret)
+ return ret;
+
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(spin);
+ kfree(spin);
+ goto out;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(spin, rq)) {
+ pr_err("%s: Spinner failed to start!\n", name);
+ igt_spinner_end(spin);
+ igt_spinner_fini(spin);
+ kfree(spin);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ *spin_out = spin;
+ }
+
+out:
+ return ret;
+}
+
+static int
+__read_slice_count(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ struct igt_spinner *spin,
+ u32 *rpcs)
+{
+ struct i915_request *rq = NULL;
+ u32 s_mask, s_shift;
+ unsigned int cnt;
+ u32 *buf, val;
+ long ret;
+
+ ret = emit_rpcs_query(obj, ctx, engine, &rq);
+ if (ret)
+ return ret;
+
+ if (spin)
+ igt_spinner_end(spin);
+
+ ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+ if (ret < 0)
+ return ret;
+
+ buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ return ret;
+ }
+
+ if (INTEL_GEN(i915) >= 11) {
+ s_mask = GEN11_RPCS_S_CNT_MASK;
+ s_shift = GEN11_RPCS_S_CNT_SHIFT;
+ } else {
+ s_mask = GEN8_RPCS_S_CNT_MASK;
+ s_shift = GEN8_RPCS_S_CNT_SHIFT;
+ }
+
+ val = *buf;
+ cnt = (val & s_mask) >> s_shift;
+ *rpcs = val;
+
+ i915_gem_object_unpin_map(obj);
+
+ return cnt;
+}
+
+static int
+__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
+ const char *prefix, const char *suffix)
+{
+ if (slices == expected)
+ return 0;
+
+ if (slices < 0) {
+ pr_err("%s: %s read slice count failed with %d%s\n",
+ name, prefix, slices, suffix);
+ return slices;
+ }
+
+ pr_err("%s: %s slice count %d is not %u%s\n",
+ name, prefix, slices, expected, suffix);
+
+ pr_info("RPCS=0x%x; %u%sx%u%s\n",
+ rpcs, slices,
+ (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
+ (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
+ (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
+
+ return -EINVAL;
+}
+
+static int
+__sseu_finish(struct drm_i915_private *i915,
+ const char *name,
+ unsigned int flags,
+ struct i915_gem_context *ctx,
+ struct i915_gem_context *kctx,
+ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ unsigned int expected,
+ struct igt_spinner *spin)
+{
+ unsigned int slices =
+ hweight32(intel_device_default_sseu(i915).slice_mask);
+ u32 rpcs = 0;
+ int ret = 0;
+
+ if (flags & TEST_RESET) {
+ ret = i915_reset_engine(engine, "sseu");
+ if (ret)
+ goto out;
+ }
+
+ ret = __read_slice_count(i915, ctx, engine, obj,
+ flags & TEST_RESET ? NULL : spin, &rpcs);
+ ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
+ if (ret)
+ goto out;
+
+ ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs);
+ ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
+
+out:
+ if (spin)
+ igt_spinner_end(spin);
+
+ if ((flags & TEST_IDLE) && ret == 0) {
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs);
+ ret = __check_rpcs(name, rpcs, ret, expected,
+ "Context", " after idle!");
+ }
+
+ return ret;
+}
+
+static int
+__sseu_test(struct drm_i915_private *i915,
+ const char *name,
+ unsigned int flags,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *obj,
+ struct intel_sseu sseu)
+{
+ struct igt_spinner *spin = NULL;
+ struct i915_gem_context *kctx;
+ int ret;
+
+ kctx = kernel_context(i915);
+ if (IS_ERR(kctx))
+ return PTR_ERR(kctx);
+
+ ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
+ if (ret)
+ goto out;
+
+ ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
+ if (ret)
+ goto out;
+
+ ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
+ hweight32(sseu.slice_mask), spin);
+
+out:
+ if (spin) {
+ igt_spinner_end(spin);
+ igt_spinner_fini(spin);
+ kfree(spin);
+ }
+
+ kernel_context_close(kctx);
+
+ return ret;
+}
+
+static int
+__igt_ctx_sseu(struct drm_i915_private *i915,
+ const char *name,
+ unsigned int flags)
+{
+ struct intel_sseu default_sseu = intel_device_default_sseu(i915);
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct intel_sseu pg_sseu;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int ret;
+
+ if (INTEL_GEN(i915) < 9)
+ return 0;
+
+ if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
+ return 0;
+
+ if (hweight32(default_sseu.slice_mask) < 2)
+ return 0;
+
+ /*
+ * Gen11 VME friendly power-gated configuration with half enabled
+ * sub-slices.
+ */
+ pg_sseu = default_sseu;
+ pg_sseu.slice_mask = 1;
+ pg_sseu.subslice_mask =
+ ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
+
+ pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+ name, flags, hweight32(default_sseu.slice_mask),
+ hweight32(pg_sseu.slice_mask));
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ if (flags & TEST_RESET)
+ igt_global_reset_lock(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ /* First set the default mask. */
+ ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ if (ret)
+ goto out_fail;
+
+ /* Then set a power-gated configuration. */
+ ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ if (ret)
+ goto out_fail;
+
+ /* Back to defaults. */
+ ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu);
+ if (ret)
+ goto out_fail;
+
+ /* One last power-gated configuration for the road. */
+ ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu);
+ if (ret)
+ goto out_fail;
+
+out_fail:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ ret = -EIO;
+
+ i915_gem_object_put(obj);
+
+ intel_runtime_pm_put(i915, wakeref);
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (flags & TEST_RESET)
+ igt_global_reset_unlock(i915);
+
+ mock_file_free(i915, file);
+
+ if (ret)
+ pr_err("%s: Failed with %d!\n", name, ret);
+
+ return ret;
+}
+
+static int igt_ctx_sseu(void *arg)
+{
+ struct {
+ const char *name;
+ unsigned int flags;
+ } *phase, phases[] = {
+ { .name = "basic", .flags = 0 },
+ { .name = "idle", .flags = TEST_IDLE },
+ { .name = "busy", .flags = TEST_BUSY },
+ { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
+ { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
+ { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
+ };
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
+ i++, phase++)
+ ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
+
+ return ret;
+}
+
static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -657,11 +1049,11 @@ static int igt_ctx_readonly(void *arg)
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
unsigned long ndwords, dw;
+ struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct live_test t;
int err = -ENODEV;
/*
@@ -676,7 +1068,7 @@ static int igt_ctx_readonly(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -699,6 +1091,8 @@ static int igt_ctx_readonly(void *arg)
unsigned int id;
for_each_engine(engine, i915, id) {
+ intel_wakeref_t wakeref;
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -713,9 +1107,9 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
- intel_runtime_pm_get(i915);
- err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ err = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -732,7 +1126,7 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, INTEL_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_rings);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -752,7 +1146,7 @@ static int igt_ctx_readonly(void *arg)
}
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -976,10 +1370,11 @@ static int igt_vm_isolation(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
- struct live_test t;
unsigned int id;
u64 vm_total;
int err;
@@ -998,7 +1393,7 @@ static int igt_vm_isolation(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -1022,7 +1417,7 @@ static int igt_vm_isolation(void *arg)
GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
count = 0;
for_each_engine(engine, i915, id) {
@@ -1064,12 +1459,12 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, INTEL_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_rings);
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -1165,6 +1560,7 @@ static int igt_switch_to_kernel_context(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
/*
@@ -1175,7 +1571,7 @@ static int igt_switch_to_kernel_context(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -1200,7 +1596,7 @@ out_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
@@ -1232,6 +1628,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
+ SUBTEST(igt_ctx_sseu),
SUBTEST(igt_vm_isolation),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 4365979d8222..32dce7176f63 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -29,11 +29,23 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
-static int populate_ggtt(struct drm_i915_private *i915)
+static void quirk_add(struct drm_i915_gem_object *obj,
+ struct list_head *objects)
{
+ /* quirk is only for live tiled objects, use it to declare ownership */
+ GEM_BUG_ON(obj->mm.quirked);
+ obj->mm.quirked = true;
+ list_add(&obj->st_link, objects);
+}
+
+static int populate_ggtt(struct drm_i915_private *i915,
+ struct list_head *objects)
+{
+ unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
u64 size;
+ count = 0;
for (size = 0;
size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
@@ -43,21 +55,36 @@ static int populate_ggtt(struct drm_i915_private *i915)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ quirk_add(obj, objects);
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
+
+ count++;
}
- if (!list_empty(&i915->mm.unbound_list)) {
- size = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- size++;
+ unbound = 0;
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+ if (obj->mm.quirked)
+ unbound++;
+ if (unbound) {
+ pr_err("%s: Found %lu objects unbound, expected %u!\n",
+ __func__, unbound, 0);
+ return -EINVAL;
+ }
- pr_err("Found %lld objects unbound!\n", size);
+ bound = 0;
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+ if (obj->mm.quirked)
+ bound++;
+ if (bound != count) {
+ pr_err("%s: Found %lu objects bound, expected %lu!\n",
+ __func__, bound, count);
return -EINVAL;
}
- if (list_empty(&i915->ggtt.vm.inactive_list)) {
+ if (list_empty(&i915->ggtt.vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -67,21 +94,26 @@ static int populate_ggtt(struct drm_i915_private *i915)
static void unpin_ggtt(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
- list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
- i915_vma_unpin(vma);
+ mutex_lock(&ggtt->vm.mutex);
+ list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+ if (vma->obj->mm.quirked)
+ i915_vma_unpin(vma);
+ mutex_unlock(&ggtt->vm.mutex);
}
-static void cleanup_objects(struct drm_i915_private *i915)
+static void cleanup_objects(struct drm_i915_private *i915,
+ struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
- list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
- i915_gem_object_put(obj);
-
- list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
+ list_for_each_entry_safe(obj, on, list, st_link) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ obj->mm.quirked = false;
i915_gem_object_put(obj);
+ }
mutex_unlock(&i915->drm.struct_mutex);
@@ -94,11 +126,12 @@ static int igt_evict_something(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -127,7 +160,7 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -136,13 +169,14 @@ static int igt_overcommit(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and then try to pin one more.
* We expect it to fail.
*/
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -152,6 +186,8 @@ static int igt_overcommit(void *arg)
goto cleanup;
}
+ quirk_add(obj, &objects);
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -160,7 +196,7 @@ static int igt_overcommit(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -172,11 +208,12 @@ static int igt_evict_for_vma(void *arg)
.start = 0,
.size = 4096,
};
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -199,7 +236,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -222,6 +259,7 @@ static int igt_evict_for_cache_color(void *arg)
};
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ LIST_HEAD(objects);
int err;
/* Currently the use of color_adjust is limited to cache domains within
@@ -237,6 +275,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
I915_GTT_PAGE_SIZE | flags);
@@ -252,6 +291,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -287,7 +327,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -296,11 +336,12 @@ static int igt_evict_vm(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -322,7 +363,7 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -336,6 +377,7 @@ static int igt_evict_contexts(void *arg)
struct drm_mm_node node;
struct reserved *next;
} *reserved = NULL;
+ intel_wakeref_t wakeref;
struct drm_mm_node hole;
unsigned long count;
int err;
@@ -355,7 +397,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -400,8 +442,10 @@ static int igt_evict_contexts(void *arg)
struct drm_file *file;
file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ break;
+ }
count = 0;
mutex_lock(&i915->drm.struct_mutex);
@@ -464,7 +508,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -480,14 +524,17 @@ int i915_gem_evict_mock_selftests(void)
SUBTEST(igt_overcommit),
};
struct drm_i915_private *i915;
- int err;
+ intel_wakeref_t wakeref;
+ int err = 0;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = i915_subtests(tests, i915);
+
mutex_unlock(&i915->drm.struct_mutex);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index a9ed0ecc94e2..3850ef4a5ec8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
@@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
count = n;
@@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
struct drm_mm_node tmp;
unsigned int *order, n;
int err;
@@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1235,7 +1237,10 @@ static void track_vma_bind(struct i915_vma *vma)
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+ mutex_lock(&vma->vm->mutex);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ mutex_unlock(&vma->vm->mutex);
}
static int exercise_mock(struct drm_i915_private *i915,
@@ -1265,27 +1270,35 @@ static int exercise_mock(struct drm_i915_private *i915,
static int igt_mock_fill(void *arg)
{
- return exercise_mock(arg, fill_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, fill_hole);
}
static int igt_mock_walk(void *arg)
{
- return exercise_mock(arg, walk_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, walk_hole);
}
static int igt_mock_pot(void *arg)
{
- return exercise_mock(arg, pot_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, pot_hole);
}
static int igt_mock_drunk(void *arg)
{
- return exercise_mock(arg, drunk_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, drunk_hole);
}
static int igt_gtt_reserve(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
LIST_HEAD(objects);
u64 total;
@@ -1298,11 +1311,12 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1316,20 +1330,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1347,11 +1361,12 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1365,20 +1380,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1399,7 +1414,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1411,18 +1426,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.vm.total,
+ offset = random_offset(0, ggtt->vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1448,7 +1463,7 @@ out:
static int igt_gtt_insert(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
struct drm_mm_node tmp = {};
const struct invalid_insert {
@@ -1457,8 +1472,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.vm.total,
+ ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, ggtt->vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1488,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
+ err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1503,11 +1518,12 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
+ total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1521,15 +1537,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1538,7 +1554,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1550,7 +1566,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1570,7 +1586,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1585,13 +1601,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1607,11 +1623,12 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1625,19 +1642,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1664,17 +1681,25 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
+ struct i915_ggtt ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
+ mock_init_ggtt(i915, &ggtt);
+
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ err = i915_subtests(tests, &ggtt);
+ mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+
+ mock_fini_ggtt(&ggtt);
drm_dev_put(&i915->drm);
+
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index c3999dd2021e..395ae878e0f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
u32 *cpu;
GEM_BUG_ON(view.partial.size > nreal);
+ cond_resched();
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (err) {
@@ -307,6 +308,7 @@ static int igt_partial_tiling(void *arg)
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
int tiling;
int err;
@@ -332,7 +334,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
@@ -443,7 +445,7 @@ next_tiling: ;
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -505,11 +507,13 @@ static void disable_retire_worker(struct drm_i915_private *i915)
mutex_lock(&i915->drm.struct_mutex);
if (!i915->gt.active_requests++) {
- intel_runtime_pm_get(i915);
- i915_gem_unpark(i915);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref)
+ i915_gem_unpark(i915);
}
mutex_unlock(&i915->drm.struct_mutex);
+
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
}
@@ -577,6 +581,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ intel_wakeref_t wakeref;
+
if (i915_terminally_wedged(&i915->gpu_error))
break;
@@ -586,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
+ err = 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
- err = make_obj_busy(obj);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = make_obj_busy(obj);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a15713cae3b3..6d766925ad04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,9 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
+selftest(timelines, i915_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
+selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 1b70208eeea7..88e5ab586337 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,8 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
-selftest(timelines, i915_gem_timeline_mock_selftests)
+selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 1f415ce47018..716a3f19f030 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -41,18 +41,37 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
return x;
}
-void i915_random_reorder(unsigned int *order, unsigned int count,
- struct rnd_state *state)
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+ struct rnd_state *state)
{
- unsigned int i, j;
+ char stack[128];
+
+ if (WARN_ON(elsz > sizeof(stack) || count > U32_MAX))
+ return;
+
+ if (!elsz || !count)
+ return;
+
+ /* Fisher-Yates shuffle courtesy of Knuth */
+ while (--count) {
+ size_t swp;
+
+ swp = i915_prandom_u32_max_state(count + 1, state);
+ if (swp == count)
+ continue;
- for (i = 0; i < count; i++) {
- BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
- j = i915_prandom_u32_max_state(count, state);
- swap(order[i], order[j]);
+ memcpy(stack, arr + count * elsz, elsz);
+ memcpy(arr + count * elsz, arr + swp * elsz, elsz);
+ memcpy(arr + swp * elsz, stack, elsz);
}
}
+void i915_random_reorder(unsigned int *order, unsigned int count,
+ struct rnd_state *state)
+{
+ i915_prandom_shuffle(order, sizeof(*order), count, state);
+}
+
unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
{
unsigned int *order, i;
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 7dffedc501ca..8e1ff9c105b6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -54,4 +54,7 @@ void i915_random_reorder(unsigned int *order,
unsigned int count,
struct rnd_state *state);
+void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
+ struct rnd_state *state);
+
#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 07e557815308..6733dc5b6b4c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -25,8 +25,12 @@
#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
+#include "i915_random.h"
+#include "igt_live_test.h"
+#include "lib_sw_fence.h"
#include "mock_context.h"
+#include "mock_drm.h"
#include "mock_gem_device.h"
static int igt_add_request(void *arg)
@@ -246,93 +250,285 @@ err_context_0:
return err;
}
-int i915_request_mock_selftests(void)
+struct smoketest {
+ struct intel_engine_cs *engine;
+ struct i915_gem_context **contexts;
+ atomic_long_t num_waits, num_fences;
+ int ncontexts, max_batch;
+ struct i915_request *(*request_alloc)(struct i915_gem_context *,
+ struct intel_engine_cs *);
+};
+
+static struct i915_request *
+__mock_request_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_add_request),
- SUBTEST(igt_wait_request),
- SUBTEST(igt_fence_wait),
- SUBTEST(igt_request_rewind),
- };
- struct drm_i915_private *i915;
- int err;
+ return mock_request(engine, ctx, 0);
+}
- i915 = mock_gem_device();
- if (!i915)
+static struct i915_request *
+__live_request_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ return i915_request_alloc(engine, ctx);
+}
+
+static int __igt_breadcrumbs_smoketest(void *arg)
+{
+ struct smoketest *t = arg;
+ struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
+ const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
+ const unsigned int total = 4 * t->ncontexts + 1;
+ unsigned int num_waits = 0, num_fences = 0;
+ struct i915_request **requests;
+ I915_RND_STATE(prng);
+ unsigned int *order;
+ int err = 0;
+
+ /*
+ * A very simple test to catch the most egregious of list handling bugs.
+ *
+ * At its heart, we simply create oodles of requests running across
+ * multiple kthreads and enable signaling on them, for the sole purpose
+ * of stressing our breadcrumb handling. The only inspection we do is
+ * that the fences were marked as signaled.
+ */
+
+ requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+ if (!requests)
return -ENOMEM;
- err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ order = i915_random_order(total, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_requests;
+ }
- return err;
-}
+ while (!kthread_should_stop()) {
+ struct i915_sw_fence *submit, *wait;
+ unsigned int n, count;
-struct live_test {
- struct drm_i915_private *i915;
- const char *func;
- const char *name;
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ err = -ENOMEM;
+ break;
+ }
- unsigned int reset_count;
-};
+ wait = heap_fence_create(GFP_KERNEL);
+ if (!wait) {
+ i915_sw_fence_commit(submit);
+ heap_fence_put(submit);
+ err = ENOMEM;
+ break;
+ }
-static int begin_live_test(struct live_test *t,
- struct drm_i915_private *i915,
- const char *func,
- const char *name)
-{
- int err;
+ i915_random_reorder(order, total, &prng);
+ count = 1 + i915_prandom_u32_max_state(max_batch, &prng);
- t->i915 = i915;
- t->func = func;
- t->name = name;
+ for (n = 0; n < count; n++) {
+ struct i915_gem_context *ctx =
+ t->contexts[order[n] % t->ncontexts];
+ struct i915_request *rq;
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err) {
- pr_err("%s(%s): failed to idle before, with err=%d!",
- func, name, err);
- return err;
+ mutex_lock(BKL);
+
+ rq = t->request_alloc(ctx, t->engine);
+ if (IS_ERR(rq)) {
+ mutex_unlock(BKL);
+ err = PTR_ERR(rq);
+ count = n;
+ break;
+ }
+
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ submit,
+ GFP_KERNEL);
+
+ requests[n] = i915_request_get(rq);
+ i915_request_add(rq);
+
+ mutex_unlock(BKL);
+
+ if (err >= 0)
+ err = i915_sw_fence_await_dma_fence(wait,
+ &rq->fence,
+ 0,
+ GFP_KERNEL);
+
+ if (err < 0) {
+ i915_request_put(rq);
+ count = n;
+ break;
+ }
+ }
+
+ i915_sw_fence_commit(submit);
+ i915_sw_fence_commit(wait);
+
+ if (!wait_event_timeout(wait->wait,
+ i915_sw_fence_done(wait),
+ HZ / 2)) {
+ struct i915_request *rq = requests[count - 1];
+
+ pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
+ count,
+ rq->fence.context, rq->fence.seqno,
+ t->engine->name);
+ i915_gem_set_wedged(t->engine->i915);
+ GEM_BUG_ON(!i915_request_completed(rq));
+ i915_sw_fence_wait(wait);
+ err = -EIO;
+ }
+
+ for (n = 0; n < count; n++) {
+ struct i915_request *rq = requests[n];
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags)) {
+ pr_err("%llu:%llu was not signaled!\n",
+ rq->fence.context, rq->fence.seqno);
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+ }
+
+ heap_fence_put(wait);
+ heap_fence_put(submit);
+
+ if (err < 0)
+ break;
+
+ num_fences += count;
+ num_waits++;
+
+ cond_resched();
}
- i915->gpu_error.missed_irq_rings = 0;
- t->reset_count = i915_reset_count(&i915->gpu_error);
+ atomic_long_add(num_fences, &t->num_fences);
+ atomic_long_add(num_waits, &t->num_waits);
- return 0;
+ kfree(order);
+out_requests:
+ kfree(requests);
+ return err;
}
-static int end_live_test(struct live_test *t)
+static int mock_breadcrumbs_smoketest(void *arg)
{
- struct drm_i915_private *i915 = t->i915;
+ struct drm_i915_private *i915 = arg;
+ struct smoketest t = {
+ .engine = i915->engine[RCS],
+ .ncontexts = 1024,
+ .max_batch = 1024,
+ .request_alloc = __mock_request_alloc
+ };
+ unsigned int ncpus = num_online_cpus();
+ struct task_struct **threads;
+ unsigned int n;
+ int ret = 0;
+
+ /*
+ * Smoketest our breadcrumb/signal handling for requests across multiple
+ * threads. A very simple test to only catch the most egregious of bugs.
+ * See __igt_breadcrumbs_smoketest();
+ */
- i915_retire_requests(i915);
+ threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
- if (wait_for(intel_engines_are_idle(i915), 10)) {
- pr_err("%s(%s): GPU not idle\n", t->func, t->name);
- return -EIO;
+ t.contexts =
+ kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ if (!t.contexts) {
+ ret = -ENOMEM;
+ goto out_threads;
}
- if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
- pr_err("%s(%s): GPU was reset %d times!\n",
- t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_count);
- return -EIO;
+ mutex_lock(&t.engine->i915->drm.struct_mutex);
+ for (n = 0; n < t.ncontexts; n++) {
+ t.contexts[n] = mock_context(t.engine->i915, "mock");
+ if (!t.contexts[n]) {
+ ret = -ENOMEM;
+ goto out_contexts;
+ }
}
+ mutex_unlock(&t.engine->i915->drm.struct_mutex);
+
+ for (n = 0; n < ncpus; n++) {
+ threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
+ &t, "igt/%d", n);
+ if (IS_ERR(threads[n])) {
+ ret = PTR_ERR(threads[n]);
+ ncpus = n;
+ break;
+ }
- if (i915->gpu_error.missed_irq_rings) {
- pr_err("%s(%s): Missed interrupts on engines %lx\n",
- t->func, t->name, i915->gpu_error.missed_irq_rings);
- return -EIO;
+ get_task_struct(threads[n]);
}
- return 0;
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+ for (n = 0; n < ncpus; n++) {
+ int err;
+
+ err = kthread_stop(threads[n]);
+ if (err < 0 && !ret)
+ ret = err;
+
+ put_task_struct(threads[n]);
+ }
+ pr_info("Completed %lu waits for %lu fence across %d cpus\n",
+ atomic_long_read(&t.num_waits),
+ atomic_long_read(&t.num_fences),
+ ncpus);
+
+ mutex_lock(&t.engine->i915->drm.struct_mutex);
+out_contexts:
+ for (n = 0; n < t.ncontexts; n++) {
+ if (!t.contexts[n])
+ break;
+ mock_context_close(t.contexts[n]);
+ }
+ mutex_unlock(&t.engine->i915->drm.struct_mutex);
+ kfree(t.contexts);
+out_threads:
+ kfree(threads);
+
+ return ret;
+}
+
+int i915_request_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_add_request),
+ SUBTEST(igt_wait_request),
+ SUBTEST(igt_fence_wait),
+ SUBTEST(igt_request_rewind),
+ SUBTEST(mock_breadcrumbs_smoketest),
+ };
+ struct drm_i915_private *i915;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+
+ return err;
}
static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
unsigned int id;
int err = -ENODEV;
@@ -342,7 +538,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -350,7 +546,7 @@ static int live_nop_request(void *arg)
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
@@ -392,7 +588,7 @@ static int live_nop_request(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_unlock;
@@ -403,7 +599,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -478,7 +674,8 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
int err = 0;
@@ -489,7 +686,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -503,7 +700,7 @@ static int live_empty_request(void *arg)
unsigned long n, prime;
ktime_t times[2] = {};
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_batch;
@@ -539,7 +736,7 @@ static int live_empty_request(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_batch;
@@ -553,7 +750,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -637,8 +834,9 @@ static int live_all_engines(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES];
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct i915_vma *batch;
- struct live_test t;
unsigned int id;
int err;
@@ -648,9 +846,9 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -722,7 +920,7 @@ static int live_all_engines(void *arg)
request[id] = NULL;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
out_request:
for_each_engine(engine, i915, id)
@@ -731,7 +929,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -742,7 +940,8 @@ static int live_sequential_engines(void *arg)
struct i915_request *request[I915_NUM_ENGINES] = {};
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
unsigned int id;
int err;
@@ -753,9 +952,9 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -838,7 +1037,7 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(!i915_request_completed(request[id]));
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
out_request:
for_each_engine(engine, i915, id) {
@@ -860,11 +1059,183 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int
+max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ int ret;
+
+ /*
+ * Before execlists, all contexts share the same ringbuffer. With
+ * execlists, each context/engine has a separate ringbuffer and
+ * for the purposes of this test, inexhaustible.
+ *
+ * For the global ringbuffer though, we have to be very careful
+ * that we do not wrap while preventing the execution of requests
+ * with a unsignaled fence.
+ */
+ if (HAS_EXECLISTS(ctx->i915))
+ return INT_MAX;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ } else {
+ int sz;
+
+ ret = rq->ring->size - rq->reserved_space;
+ i915_request_add(rq);
+
+ sz = rq->ring->emit - rq->head;
+ if (sz < 0)
+ sz += rq->ring->size;
+ ret /= sz;
+ ret /= 2; /* leave half spare, in case of emergency! */
+ }
+
+ return ret;
+}
+
+static int live_breadcrumbs_smoketest(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct smoketest t[I915_NUM_ENGINES];
+ unsigned int ncpus = num_online_cpus();
+ unsigned long num_waits, num_fences;
+ struct intel_engine_cs *engine;
+ struct task_struct **threads;
+ struct igt_live_test live;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ unsigned int n;
+ int ret = 0;
+
+ /*
+ * Smoketest our breadcrumb/signal handling for requests across multiple
+ * threads. A very simple test to only catch the most egregious of bugs.
+ * See __igt_breadcrumbs_smoketest();
+ *
+ * On real hardware this time.
+ */
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ file = mock_file(i915);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ threads = kcalloc(ncpus * I915_NUM_ENGINES,
+ sizeof(*threads),
+ GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_file;
+ }
+
+ memset(&t[0], 0, sizeof(t[0]));
+ t[0].request_alloc = __live_request_alloc;
+ t[0].ncontexts = 64;
+ t[0].contexts = kmalloc_array(t[0].ncontexts,
+ sizeof(*t[0].contexts),
+ GFP_KERNEL);
+ if (!t[0].contexts) {
+ ret = -ENOMEM;
+ goto out_threads;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (n = 0; n < t[0].ncontexts; n++) {
+ t[0].contexts[n] = live_context(i915, file);
+ if (!t[0].contexts[n]) {
+ ret = -ENOMEM;
+ goto out_contexts;
+ }
+ }
+
+ ret = igt_live_test_begin(&live, i915, __func__, "");
+ if (ret)
+ goto out_contexts;
+
+ for_each_engine(engine, i915, id) {
+ t[id] = t[0];
+ t[id].engine = engine;
+ t[id].max_batch = max_batches(t[0].contexts[0], engine);
+ if (t[id].max_batch < 0) {
+ ret = t[id].max_batch;
+ mutex_unlock(&i915->drm.struct_mutex);
+ goto out_flush;
+ }
+ /* One ring interleaved between requests from all cpus */
+ t[id].max_batch /= num_online_cpus() + 1;
+ pr_debug("Limiting batches to %d requests on %s\n",
+ t[id].max_batch, engine->name);
+
+ for (n = 0; n < ncpus; n++) {
+ struct task_struct *tsk;
+
+ tsk = kthread_run(__igt_breadcrumbs_smoketest,
+ &t[id], "igt/%d.%d", id, n);
+ if (IS_ERR(tsk)) {
+ ret = PTR_ERR(tsk);
+ mutex_unlock(&i915->drm.struct_mutex);
+ goto out_flush;
+ }
+
+ get_task_struct(tsk);
+ threads[id * ncpus + n] = tsk;
+ }
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+out_flush:
+ num_waits = 0;
+ num_fences = 0;
+ for_each_engine(engine, i915, id) {
+ for (n = 0; n < ncpus; n++) {
+ struct task_struct *tsk = threads[id * ncpus + n];
+ int err;
+
+ if (!tsk)
+ continue;
+
+ err = kthread_stop(tsk);
+ if (err < 0 && !ret)
+ ret = err;
+
+ put_task_struct(tsk);
+ }
+
+ num_waits += atomic_long_read(&t[id].num_waits);
+ num_fences += atomic_long_read(&t[id].num_fences);
+ }
+ pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
+ num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ret = igt_live_test_end(&live) ?: ret;
+out_contexts:
+ mutex_unlock(&i915->drm.struct_mutex);
+ kfree(t[0].contexts);
+out_threads:
+ kfree(threads);
+out_file:
+ mock_file_free(i915, file);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+
+ return ret;
+}
+
int i915_request_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -872,6 +1243,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_all_engines),
SUBTEST(live_sequential_engines),
SUBTEST(live_empty_request),
+ SUBTEST(live_breadcrumbs_smoketest),
};
if (i915_terminally_wedged(&i915->gpu_error))
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 86c54ea37f48..10ef0e636a24 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -197,6 +197,49 @@ int i915_live_selftests(struct pci_dev *pdev)
return 0;
}
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+ char *filter, *sep, *tok;
+ bool result = true;
+
+ filter = kstrdup(i915_selftest.filter, GFP_KERNEL);
+ for (sep = filter; (tok = strsep(&sep, ","));) {
+ bool allow = true;
+ char *sl;
+
+ if (*tok == '!') {
+ allow = false;
+ tok++;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ sl = strchr(tok, '/');
+ if (sl) {
+ *sl++ = '\0';
+ if (strcmp(tok, caller)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+ tok = sl;
+ }
+
+ if (strcmp(tok, name)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+
+ result = allow;
+ break;
+ }
+ kfree(filter);
+
+ return result;
+}
+
int __i915_subtests(const char *caller,
const struct i915_subtest *st,
unsigned int count,
@@ -209,6 +252,9 @@ int __i915_subtests(const char *caller,
if (signal_pending(current))
return -EINTR;
+ if (!apply_subtest_filter(caller, st->name))
+ continue;
+
pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
@@ -244,6 +290,7 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
+module_param_named(st_filter, i915_selftest.filter, charp, 0400);
module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 19f1c6a5c8fb..12ea69b1a1e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -4,12 +4,155 @@
* Copyright © 2017-2018 Intel Corporation
*/
+#include <linux/prime_numbers.h>
+
#include "../i915_selftest.h"
#include "i915_random.h"
+#include "igt_flush_test.h"
#include "mock_gem_device.h"
#include "mock_timeline.h"
+static struct page *hwsp_page(struct i915_timeline *tl)
+{
+ struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ return sg_page(obj->mm.pages->sgl);
+}
+
+static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+{
+ unsigned long address = (unsigned long)page_address(hwsp_page(tl));
+
+ return (address + tl->hwsp_offset) / CACHELINE_BYTES;
+}
+
+#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
+
+struct mock_hwsp_freelist {
+ struct drm_i915_private *i915;
+ struct radix_tree_root cachelines;
+ struct i915_timeline **history;
+ unsigned long count, max;
+ struct rnd_state prng;
+};
+
+enum {
+ SHUFFLE = BIT(0),
+};
+
+static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
+ unsigned int idx,
+ struct i915_timeline *tl)
+{
+ tl = xchg(&state->history[idx], tl);
+ if (tl) {
+ radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
+ i915_timeline_put(tl);
+ }
+}
+
+static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
+ unsigned int count,
+ unsigned int flags)
+{
+ struct i915_timeline *tl;
+ unsigned int idx;
+
+ while (count--) {
+ unsigned long cacheline;
+ int err;
+
+ tl = i915_timeline_create(state->i915, "mock", NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ cacheline = hwsp_cacheline(tl);
+ err = radix_tree_insert(&state->cachelines, cacheline, tl);
+ if (err) {
+ if (err == -EEXIST) {
+ pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
+ cacheline);
+ }
+ i915_timeline_put(tl);
+ return err;
+ }
+
+ idx = state->count++ % state->max;
+ __mock_hwsp_record(state, idx, tl);
+ }
+
+ if (flags & SHUFFLE)
+ i915_prandom_shuffle(state->history,
+ sizeof(*state->history),
+ min(state->count, state->max),
+ &state->prng);
+
+ count = i915_prandom_u32_max_state(min(state->count, state->max),
+ &state->prng);
+ while (count--) {
+ idx = --state->count % state->max;
+ __mock_hwsp_record(state, idx, NULL);
+ }
+
+ return 0;
+}
+
+static int mock_hwsp_freelist(void *arg)
+{
+ struct mock_hwsp_freelist state;
+ const struct {
+ const char *name;
+ unsigned int flags;
+ } phases[] = {
+ { "linear", 0 },
+ { "shuffled", SHUFFLE },
+ { },
+ }, *p;
+ unsigned int na;
+ int err = 0;
+
+ INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
+ state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
+
+ state.i915 = mock_gem_device();
+ if (!state.i915)
+ return -ENOMEM;
+
+ /*
+ * Create a bunch of timelines and check that their HWSP do not overlap.
+ * Free some, and try again.
+ */
+
+ state.max = PAGE_SIZE / sizeof(*state.history);
+ state.count = 0;
+ state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
+ if (!state.history) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ mutex_lock(&state.i915->drm.struct_mutex);
+ for (p = phases; p->name; p++) {
+ pr_debug("%s(%s)\n", __func__, p->name);
+ for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
+ err = __mock_hwsp_timeline(&state, na, p->flags);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ for (na = 0; na < state.max; na++)
+ __mock_hwsp_record(&state, na, NULL);
+ mutex_unlock(&state.i915->drm.struct_mutex);
+ kfree(state.history);
+err_put:
+ drm_dev_put(&state.i915->drm);
+ return err;
+}
+
struct __igt_sync {
const char *name;
u32 seqno;
@@ -256,12 +399,331 @@ static int bench_sync(void *arg)
return 0;
}
-int i915_gem_timeline_mock_selftests(void)
+int i915_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(mock_hwsp_freelist),
SUBTEST(igt_sync),
SUBTEST(bench_sync),
};
return i915_subtests(tests, NULL);
}
+
+static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = value;
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = value;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_request *
+tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+{
+ struct i915_request *rq;
+ int err;
+
+ lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
+
+ err = i915_timeline_pin(tl);
+ if (err) {
+ rq = ERR_PTR(err);
+ goto out;
+ }
+
+ rq = i915_request_alloc(engine, engine->i915->kernel_context);
+ if (IS_ERR(rq))
+ goto out_unpin;
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_unpin:
+ i915_timeline_unpin(tl);
+out:
+ if (IS_ERR(rq))
+ pr_err("Failed to write to timeline!\n");
+ return rq;
+}
+
+static struct i915_timeline *
+checked_i915_timeline_create(struct drm_i915_private *i915)
+{
+ struct i915_timeline *tl;
+
+ tl = i915_timeline_create(i915, "live", NULL);
+ if (IS_ERR(tl))
+ return tl;
+
+ if (*tl->hwsp_seqno != tl->seqno) {
+ pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
+ *tl->hwsp_seqno, tl->seqno);
+ i915_timeline_put(tl);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return tl;
+}
+
+static int live_hwsp_engine(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct i915_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+
+ tl = checked_i915_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ timelines[count++] = tl;
+ }
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct i915_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ i915_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_alternate(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct i915_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots with adjacent
+ * engines.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ for_each_engine(engine, i915, id) {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ tl = checked_i915_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ timelines[count++] = tl;
+ }
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < count; n++) {
+ struct i915_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ i915_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_recycle(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count;
+ int err = 0;
+
+ /*
+ * Check seqno writes into one timeline at a time. We expect to
+ * recycle the breadcrumb slot between iterations and neither
+ * want to confuse ourselves or the GPU.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ do {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+
+ tl = checked_i915_timeline_create(i915);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ rq = tl_write(tl, engine, count);
+ if (IS_ERR(rq)) {
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ i915_timeline_put(tl);
+ err = -EIO;
+ goto out;
+ }
+
+ if (*tl->hwsp_seqno != count) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+
+ i915_timeline_put(tl);
+ count++;
+
+ if (err)
+ goto out;
+
+ i915_timelines_park(i915); /* Encourage recycling! */
+ } while (!__igt_timeout(end_time, NULL));
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+int i915_timeline_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_hwsp_recycle),
+ SUBTEST(live_hwsp_engine),
+ SUBTEST(live_hwsp_alternate),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index ffa74290e054..cf1de82741fa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -28,6 +28,7 @@
#include "mock_gem_device.h"
#include "mock_context.h"
+#include "mock_gtt.h"
static bool assert_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj,
@@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915,
static int igt_vma_create(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct drm_i915_gem_object *obj, *on;
struct i915_gem_context *ctx, *cn;
unsigned long num_obj, num_ctx;
@@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma,
static int igt_vma_pin1(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
const struct pin_mode modes[] = {
#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
@@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
-
- VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
+
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
VALID(8192, PIN_GLOBAL),
- VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
- NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.vm.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
+ VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+ NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->vm.total - 4096, PIN_GLOBAL),
+ VALID(ggtt->vm.total, PIN_GLOBAL),
+ NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
- INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
- VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/* Misusing BIAS is a programming error (it is not controllable
@@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg)
* However, the tests are still quite interesting for checking
* variable start, end and size.
*/
- NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
- NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
+ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total),
+ NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
+ GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = checked_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.vm;
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg)
* that the page layout within the rotated VMA match our expectations.
*/
- obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
if (IS_ERR(obj))
goto out;
@@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.vm;
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
@@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg)
* we are returned the same VMA when we later request the same range.
*/
- obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
if (IS_ERR(obj))
goto out;
@@ -670,7 +672,7 @@ static int igt_vma_partial(void *arg)
}
count = 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link)
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
count++;
if (count != nvma) {
pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
@@ -699,7 +701,7 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
count = 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link)
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
count++;
if (count != nvma) {
pr_err("(%s) allocated an extra full vma!\n", p->name);
@@ -723,17 +725,24 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
+ struct i915_ggtt ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
+ mock_init_ggtt(i915, &ggtt);
+
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ err = i915_subtests(tests, &ggtt);
+ mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+
+ mock_fini_ggtt(&ggtt);
drm_dev_put(&i915->drm);
+
return err;
}
-
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
new file mode 100644
index 000000000000..3e902761cd16
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -0,0 +1,78 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_drv.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_live_test.h"
+
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, i915, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
+
+ return 0;
+}
+
+int igt_live_test_end(struct igt_live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
+
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
new file mode 100644
index 000000000000..c0e9f99d50de
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_LIVE_TEST_H
+#define IGT_LIVE_TEST_H
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
+};
+
+/*
+ * Flush the GPU state before and after the test to ensure that no residual
+ * code is running on the GPU that may affect this test. Also compare the
+ * state before and after the test and alert if it unexpectedly changes,
+ * e.g. if the GPU was reset.
+ */
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name);
+int igt_live_test_end(struct igt_live_test *t);
+
+#endif /* IGT_LIVE_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8cd34f6e6859..9ebd9225684e 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + seqno_offset(rq->fence.context);
}
-static int emit_recurse_batch(struct igt_spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ int err;
+
+ err = i915_vma_move_to_active(vma, rq, flags);
+ if (err)
+ return err;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ return 0;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
+ struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
vma = i915_vma_instance(spin->obj, vm, NULL);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, vm, NULL);
if (IS_ERR(hws))
- return PTR_ERR(hws);
+ return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- return err;
+ return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
}
- err = i915_vma_move_to_active(hws, rq, 0);
+ err = move_to_active(vma, rq, 0);
if (err)
- goto unpin_hws;
+ goto cancel_rq;
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
batch = spin->batch;
@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+cancel_rq:
+ if (err) {
+ i915_request_skip(rq, err);
+ i915_request_add(rq);
+ }
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
- return err;
-}
-
-struct i915_request *
-igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
+ return err ? ERR_PTR(err) : rq;
}
static u32
@@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
10) &&
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
deleted file mode 100644
index f03b407fdbe2..000000000000
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "../i915_selftest.h"
-#include "i915_random.h"
-
-#include "mock_gem_device.h"
-#include "mock_engine.h"
-
-static int check_rbtree(struct intel_engine_cs *engine,
- const unsigned long *bitmap,
- const struct intel_wait *waiters,
- const int count)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node *rb;
- int n;
-
- if (&b->irq_wait->node != rb_first(&b->waiters)) {
- pr_err("First waiter does not match first element of wait-tree\n");
- return -EINVAL;
- }
-
- n = find_first_bit(bitmap, count);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
- int idx = w - waiters;
-
- if (!test_bit(idx, bitmap)) {
- pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
- idx, w->seqno);
- return -EINVAL;
- }
-
- if (n != idx) {
- pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
- idx, w->seqno, n);
- return -EINVAL;
- }
-
- n = find_next_bit(bitmap, count, n + 1);
- }
-
- return 0;
-}
-
-static int check_completion(struct intel_engine_cs *engine,
- const unsigned long *bitmap,
- const struct intel_wait *waiters,
- const int count)
-{
- int n;
-
- for (n = 0; n < count; n++) {
- if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
- continue;
-
- pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
- n, waiters[n].seqno,
- intel_wait_complete(&waiters[n]) ? "complete" : "active",
- test_bit(n, bitmap) ? "active" : "complete");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int check_rbtree_empty(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- if (b->irq_wait) {
- pr_err("Empty breadcrumbs still has a waiter\n");
- return -EINVAL;
- }
-
- if (!RB_EMPTY_ROOT(&b->waiters)) {
- pr_err("Empty breadcrumbs, but wait-tree not empty\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int igt_random_insert_remove(void *arg)
-{
- const u32 seqno_bias = 0x1000;
- I915_RND_STATE(prng);
- struct intel_engine_cs *engine = arg;
- struct intel_wait *waiters;
- const int count = 4096;
- unsigned int *order;
- unsigned long *bitmap;
- int err = -ENOMEM;
- int n;
-
- mock_engine_reset(engine);
-
- waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
- if (!waiters)
- goto out_engines;
-
- bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
- GFP_KERNEL);
- if (!bitmap)
- goto out_waiters;
-
- order = i915_random_order(count, &prng);
- if (!order)
- goto out_bitmap;
-
- for (n = 0; n < count; n++)
- intel_wait_init_for_seqno(&waiters[n], seqno_bias + n);
-
- err = check_rbtree(engine, bitmap, waiters, count);
- if (err)
- goto out_order;
-
- /* Add and remove waiters into the rbtree in random order. At each
- * step, we verify that the rbtree is correctly ordered.
- */
- for (n = 0; n < count; n++) {
- int i = order[n];
-
- intel_engine_add_wait(engine, &waiters[i]);
- __set_bit(i, bitmap);
-
- err = check_rbtree(engine, bitmap, waiters, count);
- if (err)
- goto out_order;
- }
-
- i915_random_reorder(order, count, &prng);
- for (n = 0; n < count; n++) {
- int i = order[n];
-
- intel_engine_remove_wait(engine, &waiters[i]);
- __clear_bit(i, bitmap);
-
- err = check_rbtree(engine, bitmap, waiters, count);
- if (err)
- goto out_order;
- }
-
- err = check_rbtree_empty(engine);
-out_order:
- kfree(order);
-out_bitmap:
- kfree(bitmap);
-out_waiters:
- kvfree(waiters);
-out_engines:
- mock_engine_flush(engine);
- return err;
-}
-
-static int igt_insert_complete(void *arg)
-{
- const u32 seqno_bias = 0x1000;
- struct intel_engine_cs *engine = arg;
- struct intel_wait *waiters;
- const int count = 4096;
- unsigned long *bitmap;
- int err = -ENOMEM;
- int n, m;
-
- mock_engine_reset(engine);
-
- waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
- if (!waiters)
- goto out_engines;
-
- bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
- GFP_KERNEL);
- if (!bitmap)
- goto out_waiters;
-
- for (n = 0; n < count; n++) {
- intel_wait_init_for_seqno(&waiters[n], n + seqno_bias);
- intel_engine_add_wait(engine, &waiters[n]);
- __set_bit(n, bitmap);
- }
- err = check_rbtree(engine, bitmap, waiters, count);
- if (err)
- goto out_bitmap;
-
- /* On each step, we advance the seqno so that several waiters are then
- * complete (we increase the seqno by increasingly larger values to
- * retire more and more waiters at once). All retired waiters should
- * be woken and removed from the rbtree, and so that we check.
- */
- for (n = 0; n < count; n = m) {
- int seqno = 2 * n;
-
- GEM_BUG_ON(find_first_bit(bitmap, count) != n);
-
- if (intel_wait_complete(&waiters[n])) {
- pr_err("waiter[%d, seqno=%d] completed too early\n",
- n, waiters[n].seqno);
- err = -EINVAL;
- goto out_bitmap;
- }
-
- /* complete the following waiters */
- mock_seqno_advance(engine, seqno + seqno_bias);
- for (m = n; m <= seqno; m++) {
- if (m == count)
- break;
-
- GEM_BUG_ON(!test_bit(m, bitmap));
- __clear_bit(m, bitmap);
- }
-
- intel_engine_remove_wait(engine, &waiters[n]);
- RB_CLEAR_NODE(&waiters[n].node);
-
- err = check_rbtree(engine, bitmap, waiters, count);
- if (err) {
- pr_err("rbtree corrupt after seqno advance to %d\n",
- seqno + seqno_bias);
- goto out_bitmap;
- }
-
- err = check_completion(engine, bitmap, waiters, count);
- if (err) {
- pr_err("completions after seqno advance to %d failed\n",
- seqno + seqno_bias);
- goto out_bitmap;
- }
- }
-
- err = check_rbtree_empty(engine);
-out_bitmap:
- kfree(bitmap);
-out_waiters:
- kvfree(waiters);
-out_engines:
- mock_engine_flush(engine);
- return err;
-}
-
-struct igt_wakeup {
- struct task_struct *tsk;
- atomic_t *ready, *set, *done;
- struct intel_engine_cs *engine;
- unsigned long flags;
-#define STOP 0
-#define IDLE 1
- wait_queue_head_t *wq;
- u32 seqno;
-};
-
-static bool wait_for_ready(struct igt_wakeup *w)
-{
- DEFINE_WAIT(ready);
-
- set_bit(IDLE, &w->flags);
- if (atomic_dec_and_test(w->done))
- wake_up_var(w->done);
-
- if (test_bit(STOP, &w->flags))
- goto out;
-
- for (;;) {
- prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
- if (atomic_read(w->ready) == 0)
- break;
-
- schedule();
- }
- finish_wait(w->wq, &ready);
-
-out:
- clear_bit(IDLE, &w->flags);
- if (atomic_dec_and_test(w->set))
- wake_up_var(w->set);
-
- return !test_bit(STOP, &w->flags);
-}
-
-static int igt_wakeup_thread(void *arg)
-{
- struct igt_wakeup *w = arg;
- struct intel_wait wait;
-
- while (wait_for_ready(w)) {
- GEM_BUG_ON(kthread_should_stop());
-
- intel_wait_init_for_seqno(&wait, w->seqno);
- intel_engine_add_wait(w->engine, &wait);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
- w->seqno))
- break;
-
- if (test_bit(STOP, &w->flags)) /* emergency escape */
- break;
-
- schedule();
- }
- intel_engine_remove_wait(w->engine, &wait);
- __set_current_state(TASK_RUNNING);
- }
-
- return 0;
-}
-
-static void igt_wake_all_sync(atomic_t *ready,
- atomic_t *set,
- atomic_t *done,
- wait_queue_head_t *wq,
- int count)
-{
- atomic_set(set, count);
- atomic_set(ready, 0);
- wake_up_all(wq);
-
- wait_var_event(set, !atomic_read(set));
- atomic_set(ready, count);
- atomic_set(done, count);
-}
-
-static int igt_wakeup(void *arg)
-{
- I915_RND_STATE(prng);
- struct intel_engine_cs *engine = arg;
- struct igt_wakeup *waiters;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- const int count = 4096;
- const u32 max_seqno = count / 4;
- atomic_t ready, set, done;
- int err = -ENOMEM;
- int n, step;
-
- mock_engine_reset(engine);
-
- waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL);
- if (!waiters)
- goto out_engines;
-
- /* Create a large number of threads, each waiting on a random seqno.
- * Multiple waiters will be waiting for the same seqno.
- */
- atomic_set(&ready, count);
- for (n = 0; n < count; n++) {
- waiters[n].wq = &wq;
- waiters[n].ready = &ready;
- waiters[n].set = &set;
- waiters[n].done = &done;
- waiters[n].engine = engine;
- waiters[n].flags = BIT(IDLE);
-
- waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
- "i915/igt:%d", n);
- if (IS_ERR(waiters[n].tsk))
- goto out_waiters;
-
- get_task_struct(waiters[n].tsk);
- }
-
- for (step = 1; step <= max_seqno; step <<= 1) {
- u32 seqno;
-
- /* The waiter threads start paused as we assign them a random
- * seqno and reset the engine. Once the engine is reset,
- * we signal that the threads may begin their wait upon their
- * seqno.
- */
- for (n = 0; n < count; n++) {
- GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
- waiters[n].seqno =
- 1 + prandom_u32_state(&prng) % max_seqno;
- }
- mock_seqno_advance(engine, 0);
- igt_wake_all_sync(&ready, &set, &done, &wq, count);
-
- /* Simulate the GPU doing chunks of work, with one or more
- * seqno appearing to finish at the same time. A random number
- * of threads will be waiting upon the update and hopefully be
- * woken.
- */
- for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
- usleep_range(50, 500);
- mock_seqno_advance(engine, seqno);
- }
- GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
-
- /* With the seqno now beyond any of the waiting threads, they
- * should all be woken, see that they are complete and signal
- * that they are ready for the next test. We wait until all
- * threads are complete and waiting for us (i.e. not a seqno).
- */
- if (!wait_var_event_timeout(&done,
- !atomic_read(&done), 10 * HZ)) {
- pr_err("Timed out waiting for %d remaining waiters\n",
- atomic_read(&done));
- err = -ETIMEDOUT;
- break;
- }
-
- err = check_rbtree_empty(engine);
- if (err)
- break;
- }
-
-out_waiters:
- for (n = 0; n < count; n++) {
- if (IS_ERR(waiters[n].tsk))
- break;
-
- set_bit(STOP, &waiters[n].flags);
- }
- mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
- igt_wake_all_sync(&ready, &set, &done, &wq, n);
-
- for (n = 0; n < count; n++) {
- if (IS_ERR(waiters[n].tsk))
- break;
-
- kthread_stop(waiters[n].tsk);
- put_task_struct(waiters[n].tsk);
- }
-
- kvfree(waiters);
-out_engines:
- mock_engine_flush(engine);
- return err;
-}
-
-int intel_breadcrumbs_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_random_insert_remove),
- SUBTEST(igt_insert_complete),
- SUBTEST(igt_wakeup),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915->engine[RCS]);
- drm_dev_put(&i915->drm);
-
- return err;
-}
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 32cba4cae31a..c5e0a0e98fcb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
static int igt_guc_clients(void *args)
{
struct drm_i915_private *dev_priv = args;
+ intel_wakeref_t wakeref;
struct intel_guc *guc;
int err = 0;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -225,7 +226,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -238,13 +239,14 @@ unlock:
static int igt_guc_doorbells(void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ intel_wakeref_t wakeref;
struct intel_guc *guc;
int i, err = 0;
u16 db_id;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -337,7 +339,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 40efbed611de..7b6f3bea9ef8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
-static int emit_recurse_batch(struct hang *h,
- struct i915_request *rq)
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ err = i915_vma_move_to_active(vma, rq, flags);
+ if (err)
+ return err;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ return 0;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm =
- rq->gem_context->ppgtt ?
- &rq->gem_context->ppgtt->vm :
- &i915->ggtt.vm;
+ h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
int err;
+ if (i915_gem_object_is_active(h->obj)) {
+ struct drm_i915_gem_object *obj;
+ void *vaddr;
+
+ obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vaddr = i915_gem_object_pin_map(obj,
+ i915_coherent_map_type(h->i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ h->obj = obj;
+ h->batch = vaddr;
+ }
+
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
hws = i915_vma_instance(h->hws, vm, NULL);
if (IS_ERR(hws))
- return PTR_ERR(hws);
+ return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- return err;
+ return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
+ rq = i915_request_alloc(engine, h->ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
}
- err = i915_vma_move_to_active(hws, rq, 0);
+ err = move_to_active(vma, rq, 0);
if (err)
- goto unpin_hws;
+ goto cancel_rq;
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
batch = h->batch;
if (INTEL_GEN(i915) >= 8) {
@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+cancel_rq:
+ if (err) {
+ i915_request_skip(rq, err);
+ i915_request_add(rq);
+ }
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
- int err;
-
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
-
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
-
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
-
- h->obj = obj;
- h->batch = vaddr;
- }
-
- rq = i915_request_alloc(engine, h->ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(h, rq);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
+ return err ? ERR_PTR(err) : rq;
}
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
@@ -364,9 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(i915);
- set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
- mutex_lock(&i915->drm.struct_mutex);
reset_count = i915_reset_count(&i915->gpu_error);
i915_reset(i915, ALL_ENGINES, NULL);
@@ -375,9 +372,7 @@ static int igt_global_reset(void *arg)
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
- mutex_unlock(&i915->drm.struct_mutex);
- GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
@@ -386,6 +381,29 @@ static int igt_global_reset(void *arg)
return err;
}
+static int igt_wedged_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+
+ /* Check that we can recover a wedged device with a GPU reset */
+
+ igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
+
+ i915_gem_set_wedged(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+ i915_reset(i915, ALL_ENGINES, NULL);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+ igt_global_reset_unlock(i915);
+
+ return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
+
static bool wait_for_idle(struct intel_engine_cs *engine)
{
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -431,8 +449,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
- u32 seqno = intel_engine_get_seqno(engine);
-
if (active) {
struct i915_request *rq;
@@ -451,7 +467,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
+ pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@@ -461,8 +477,6 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- GEM_BUG_ON(!rq->global_seqno);
- seqno = rq->global_seqno - 1;
i915_request_put(rq);
}
@@ -478,16 +492,15 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- reset_engine_count += active;
if (i915_reset_engine_count(&i915->gpu_error, engine) !=
- reset_engine_count) {
- pr_err("%s engine reset %srecorded!\n",
- engine->name, active ? "not " : "");
+ ++reset_engine_count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
err = -EINVAL;
break;
}
- if (!wait_for_idle(engine)) {
+ if (!i915_reset_flush(i915)) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
@@ -552,7 +565,7 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
rq->engine->name,
rq->fence.context,
rq->fence.seqno,
@@ -710,7 +723,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
- u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
@@ -729,7 +741,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
+ pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@@ -738,9 +750,6 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
err = -EIO;
break;
}
-
- GEM_BUG_ON(!rq->global_seqno);
- seqno = rq->global_seqno - 1;
}
err = i915_reset_engine(engine, NULL);
@@ -777,10 +786,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
reported = i915_reset_engine_count(&i915->gpu_error, engine);
reported -= threads[engine->id].resets;
- if (reported != (flags & TEST_ACTIVE ? count : 0)) {
- pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n",
- engine->name, test_name, count, reported,
- (flags & TEST_ACTIVE ? count : 0));
+ if (reported != count) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, test_name, count, reported);
if (!err)
err = -EINVAL;
}
@@ -879,20 +887,13 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
{
- struct i915_gpu_error *error = &rq->i915->gpu_error;
- u32 reset_count = i915_reset_count(error);
-
- error->stalled_mask = mask;
-
- /* set_bit() must be after we have setup the backchannel (mask) */
- smp_mb__before_atomic();
- set_bit(I915_RESET_HANDOFF, &error->flags);
+ u32 count = i915_reset_count(&i915->gpu_error);
- wake_up_all(&error->wait_queue);
+ i915_reset(i915, mask, NULL);
- return reset_count;
+ return count;
}
static int igt_reset_wait(void *arg)
@@ -928,7 +929,7 @@ static int igt_reset_wait(void *arg)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
+ pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@@ -938,7 +939,7 @@ static int igt_reset_wait(void *arg)
goto out_rq;
}
- reset_count = fake_hangcheck(rq, ALL_ENGINES);
+ reset_count = fake_hangcheck(i915, ALL_ENGINES);
timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
if (timeout < 0) {
@@ -948,7 +949,6 @@ static int igt_reset_wait(void *arg)
goto out_rq;
}
- GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
@@ -1107,7 +1107,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
+ pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
wait_for_completion(&arg.completion);
- if (wait_for(waitqueue_active(&rq->execute), 10)) {
+ if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n");
@@ -1138,7 +1138,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
out_reset:
- fake_hangcheck(rq, intel_engine_flag(rq->engine));
+ fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
if (tsk) {
struct igt_wedge_me w;
@@ -1302,7 +1302,7 @@ static int igt_reset_queue(void *arg)
if (!wait_until_running(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s(%s): Failed to start request %x, at %x\n",
+ pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
prev->fence.seqno, hws_seqno(&h, prev));
intel_engine_dump(engine, &p,
@@ -1317,12 +1317,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- reset_count = fake_hangcheck(prev, ENGINE_MASK(id));
-
- i915_reset(i915, ENGINE_MASK(id), NULL);
-
- GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
- &i915->gpu_error.flags));
+ reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1413,7 +1408,7 @@ static int igt_handle_error(void *arg)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("%s: Failed to start request %x, at %x\n",
+ pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@@ -1449,10 +1444,203 @@ err_unlock:
return err;
}
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+struct atomic_section {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+};
+
+static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct atomic_section *p,
+ const char *mode)
+{
+ struct tasklet_struct * const t = &engine->execlists.tasklet;
+ int err;
+
+ GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
+ engine->name, mode, p->name);
+
+ tasklet_disable_nosync(t);
+ p->critical_section_begin();
+
+ err = i915_reset_engine(engine, NULL);
+
+ p->critical_section_end();
+ tasklet_enable(t);
+
+ if (err)
+ pr_err("i915_reset_engine(%s:%s) failed under %s\n",
+ engine->name, mode, p->name);
+
+ return err;
+}
+
+static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct atomic_section *p)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq;
+ struct hang h;
+ int err;
+
+ err = __igt_atomic_reset_engine(engine, p, "idle");
+ if (err)
+ return err;
+
+ err = hang_init(&h, i915);
+ if (err)
+ return err;
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (wait_until_running(&h, rq)) {
+ err = __igt_atomic_reset_engine(engine, p, "active");
+ } else {
+ pr_err("%s(%s): Failed to start request %llx, at %x\n",
+ __func__, engine->name,
+ rq->fence.seqno, hws_seqno(&h, rq));
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ }
+
+ if (err == 0) {
+ struct igt_wedge_me w;
+
+ igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+ i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+out:
+ hang_fini(&h);
+ return err;
+}
+
+static void force_reset(struct drm_i915_private *i915)
+{
+ i915_gem_set_wedged(i915);
+ i915_reset(i915, 0, NULL);
+}
+
+static int igt_atomic_reset(void *arg)
+{
+ static const struct atomic_section phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+ };
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0; /* guc is dead; long live the guc */
+
+ igt_global_reset_lock(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ /* Flush any requests before we get started and check basics */
+ force_reset(i915);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ goto unlock;
+
+ if (intel_has_gpu_reset(i915)) {
+ const typeof(*phases) *p;
+
+ for (p = phases; p->name; p++) {
+ GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+
+ p->critical_section_begin();
+ err = intel_gpu_reset(i915, ALL_ENGINES);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_gpu_reset failed under %s\n",
+ p->name);
+ goto out;
+ }
+ }
+
+ force_reset(i915);
+ }
+
+ if (intel_has_reset_engine(i915)) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ const typeof(*phases) *p;
+
+ for (p = phases; p->name; p++) {
+ err = igt_atomic_reset_engine(engine, p);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* As we poke around the guts, do a full reset before continuing. */
+ force_reset(i915);
+
+unlock:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ igt_global_reset_unlock(i915);
+
+ return err;
+}
+
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
@@ -1463,7 +1651,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
+ SUBTEST(igt_atomic_reset),
};
+ intel_wakeref_t wakeref;
bool saved_hangcheck;
int err;
@@ -1473,8 +1663,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO; /* we're long past hope of a successful reset */
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
+ drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
err = i915_subtests(tests, i915);
@@ -1483,7 +1674,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index ca461e3a5f27..58144e024751 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -4,6 +4,10 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/prime_numbers.h>
+
+#include "../i915_reset.h"
+
#include "../i915_selftest.h"
#include "igt_flush_test.h"
#include "igt_spinner.h"
@@ -18,13 +22,14 @@ static int live_sanitycheck(void *arg)
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
@@ -65,7 +70,7 @@ err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -77,13 +82,14 @@ static int live_preempt(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -158,7 +164,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -171,13 +177,14 @@ static int live_late_preempt(void *arg)
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -251,7 +258,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -263,6 +270,243 @@ err_wedged:
goto err_ctx_lo;
}
+struct preempt_client {
+ struct igt_spinner spin;
+ struct i915_gem_context *ctx;
+};
+
+static int preempt_client_init(struct drm_i915_private *i915,
+ struct preempt_client *c)
+{
+ c->ctx = kernel_context(i915);
+ if (!c->ctx)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&c->spin, i915))
+ goto err_ctx;
+
+ return 0;
+
+err_ctx:
+ kernel_context_close(c->ctx);
+ return -ENOMEM;
+}
+
+static void preempt_client_fini(struct preempt_client *c)
+{
+ igt_spinner_fini(&c->spin);
+ kernel_context_close(c->ctx);
+}
+
+static int live_suppress_self_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
+ };
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that if a preemption request does not cause a change in
+ * the current execution order, the preempt-to-idle injection is
+ * skipped and that we do not accidentally apply it after the CS
+ * completion event.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0; /* presume black blox */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &a))
+ goto err_unlock;
+ if (preempt_client_init(i915, &b))
+ goto err_client_a;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq_a, *rq_b;
+ int depth;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = igt_spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ for (depth = 0; depth < 8; depth++) {
+ rq_b = igt_spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+ i915_request_add(rq_b);
+
+ GEM_BUG_ON(i915_request_completed(rq_a));
+ engine->schedule(rq_a, &attr);
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ swap(a, b);
+ rq_a = rq_b;
+ }
+ igt_spinner_end(&a.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count,
+ depth);
+ err = -EINVAL;
+ goto err_client_b;
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_b;
+}
+
+static int live_chain_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client hi, lo;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+
+ /*
+ * Build a chain AB...BA between two contexts (A, B) and request
+ * preemption of the last request. It should then complete before
+ * the previously submitted spinner in B.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &hi))
+ goto err_unlock;
+
+ if (preempt_client_init(i915, &lo))
+ goto err_client_hi;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ int count, i;
+
+ for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
+ struct i915_request *rq;
+
+ rq = igt_spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&hi.spin, rq))
+ goto err_wedged;
+
+ rq = igt_spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ for (i = 0; i < count; i++) {
+ rq = i915_request_alloc(engine, lo.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ }
+
+ rq = i915_request_alloc(engine, hi.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ engine->schedule(rq, &attr);
+
+ igt_spinner_end(&hi.spin);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to preempt over chain of %d\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ goto err_wedged;
+ }
+ igt_spinner_end(&lo.spin);
+ }
+ }
+
+ err = 0;
+err_client_lo:
+ preempt_client_fini(&lo);
+err_client_hi:
+ preempt_client_fini(&hi);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&hi.spin);
+ igt_spinner_end(&lo.spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_lo;
+}
+
static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -270,6 +514,7 @@ static int live_preempt_hang(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
@@ -279,7 +524,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -374,7 +619,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -522,7 +767,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -550,7 +795,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -562,6 +807,7 @@ static int live_preempt_smoke(void *arg)
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -576,7 +822,7 @@ static int live_preempt_smoke(void *arg)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
- intel_runtime_pm_get(smoke.i915);
+ wakeref = intel_runtime_pm_get(smoke.i915);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
@@ -627,7 +873,7 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915);
+ intel_runtime_pm_put(smoke.i915, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
@@ -640,6 +886,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_sanitycheck),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
};
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 67017d5175b8..b15c4f26c593 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -5,6 +5,7 @@
*/
#include "../i915_selftest.h"
+#include "../i915_reset.h"
#include "igt_flush_test.h"
#include "igt_reset.h"
@@ -12,13 +13,59 @@
#include "igt_wedge_me.h"
#include "mock_context.h"
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+struct wa_lists {
+ struct i915_wa_list gt_wa_list;
+ struct {
+ char name[REF_NAME_MAX];
+ struct i915_wa_list wa_list;
+ } engine[I915_NUM_ENGINES];
+};
+
+static void
+reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ memset(lists, 0, sizeof(*lists));
+
+ wa_init_start(&lists->gt_wa_list, "GT_REF");
+ gt_init_workarounds(i915, &lists->gt_wa_list);
+ wa_init_finish(&lists->gt_wa_list);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_wa_list *wal = &lists->engine[id].wa_list;
+ char *name = lists->engine[id].name;
+
+ snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
+
+ wa_init_start(wal, name);
+ engine_init_workarounds(engine, wal);
+ wa_init_finish(wal);
+ }
+}
+
+static void
+reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ intel_wa_list_free(&lists->engine[id].wa_list);
+
+ intel_wa_list_free(&lists->gt_wa_list);
+}
+
static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
+ const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
+ intel_wakeref_t wakeref;
struct i915_request *rq;
struct i915_vma *vma;
- const u32 base = engine->mmio_base;
u32 srm, *cs;
int err;
int i;
@@ -47,9 +94,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
- intel_runtime_pm_put(engine->i915);
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -167,7 +214,6 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
@@ -183,20 +229,22 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- intel_runtime_pm_get(engine->i915);
-
- if (spin)
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
-
- intel_runtime_pm_put(engine->i915);
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref) {
+ if (spin)
+ rq = igt_spinner_create_request(spin,
+ ctx, engine,
+ MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+ }
kernel_context_close(ctx);
@@ -228,6 +276,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err;
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
@@ -253,9 +302,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out;
- intel_runtime_pm_get(i915);
- err = reset(engine);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = reset(engine);
if (want_spin) {
igt_spinner_end(&spin);
@@ -326,16 +374,17 @@ out:
return err;
}
-static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+static bool verify_gt_engine_wa(struct drm_i915_private *i915,
+ struct wa_lists *lists, const char *str)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool ok = true;
- ok &= intel_gt_verify_workarounds(i915, str);
+ ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= intel_engine_verify_workarounds(engine, str);
+ ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
return ok;
}
@@ -344,7 +393,8 @@ static int
live_gpu_reset_gt_engine_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_gpu_error *error = &i915->gpu_error;
+ intel_wakeref_t wakeref;
+ struct wa_lists lists;
bool ok;
if (!intel_has_gpu_reset(i915))
@@ -353,19 +403,21 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
- ok = verify_gt_engine_wa(i915, "before reset");
+ reference_lists_init(i915, &lists);
+
+ ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok)
goto out;
- intel_runtime_pm_get(i915);
- set_bit(I915_RESET_HANDOFF, &error->flags);
i915_reset(i915, ALL_ENGINES, "live_workarounds");
- intel_runtime_pm_put(i915);
- ok = verify_gt_engine_wa(i915, "after reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after reset");
out:
+ reference_lists_fini(i915, &lists);
+ intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
@@ -380,6 +432,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
struct igt_spinner spin;
enum intel_engine_id id;
struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct wa_lists lists;
int ret = 0;
if (!intel_has_reset_engine(i915))
@@ -390,23 +444,24 @@ live_engine_reset_gt_engine_workarounds(void *arg)
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
+
+ reference_lists_init(i915, &lists);
for_each_engine(engine, i915, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
- ok = verify_gt_engine_wa(i915, "before reset");
+ ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_runtime_pm_get(i915);
i915_reset_engine(engine, "live_workarounds");
- intel_runtime_pm_put(i915);
- ok = verify_gt_engine_wa(i915, "after idle reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -416,13 +471,10 @@ live_engine_reset_gt_engine_workarounds(void *arg)
if (ret)
goto err;
- intel_runtime_pm_get(i915);
-
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
igt_spinner_fini(&spin);
- intel_runtime_pm_put(i915);
goto err;
}
@@ -431,19 +483,16 @@ live_engine_reset_gt_engine_workarounds(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- intel_runtime_pm_put(i915);
ret = -ETIMEDOUT;
goto err;
}
i915_reset_engine(engine, "live_workarounds");
- intel_runtime_pm_put(i915);
-
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_gt_engine_wa(i915, "after busy reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -451,6 +500,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
}
err:
+ reference_lists_fini(i915, &lists);
+ intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index b26f07b55d86..2bfa72c1654b 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf)
destroy_timer_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
+
+struct heap_fence {
+ struct i915_sw_fence fence;
+ union {
+ struct kref ref;
+ struct rcu_head rcu;
+ };
+};
+
+static int __i915_sw_fence_call
+heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ heap_fence_put(&h->fence);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct i915_sw_fence *heap_fence_create(gfp_t gfp)
+{
+ struct heap_fence *h;
+
+ h = kmalloc(sizeof(*h), gfp);
+ if (!h)
+ return NULL;
+
+ i915_sw_fence_init(&h->fence, heap_fence_notify);
+ refcount_set(&h->ref.refcount, 2);
+
+ return &h->fence;
+}
+
+static void heap_fence_release(struct kref *ref)
+{
+ struct heap_fence *h = container_of(ref, typeof(*h), ref);
+
+ i915_sw_fence_fini(&h->fence);
+
+ kfree_rcu(h, rcu);
+}
+
+void heap_fence_put(struct i915_sw_fence *fence)
+{
+ struct heap_fence *h = container_of(fence, typeof(*h), fence);
+
+ kref_put(&h->ref, heap_fence_release);
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
index 474aafb92ae1..1f9927e10f3a 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -39,4 +39,7 @@ struct timed_fence {
void timed_fence_init(struct timed_fence *tf, unsigned long expires);
void timed_fence_fini(struct timed_fence *tf);
+struct i915_sw_fence *heap_fence_create(gfp_t gfp);
+void heap_fence_put(struct i915_sw_fence *fence);
+
#endif /* _LIB_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index d937bdff26f9..b646cdcdd602 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -45,11 +45,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
-
- ce->gem_context = ctx;
- }
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+ intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index d0c44c18db42..08f0cab02e0f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -30,6 +30,52 @@ struct mock_ring {
struct i915_timeline timeline;
};
+static void mock_timeline_pin(struct i915_timeline *tl)
+{
+ tl->pin_count++;
+}
+
+static void mock_timeline_unpin(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ tl->pin_count--;
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+ const unsigned long sz = PAGE_SIZE / 2;
+ struct mock_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ if (i915_timeline_init(engine->i915,
+ &ring->timeline, engine->name,
+ NULL)) {
+ kfree(ring);
+ return NULL;
+ }
+
+ ring->base.size = sz;
+ ring->base.effective_size = sz;
+ ring->base.vaddr = (void *)(ring + 1);
+ ring->base.timeline = &ring->timeline;
+
+ INIT_LIST_HEAD(&ring->base.request_list);
+ intel_ring_update_space(&ring->base);
+
+ return &ring->base;
+}
+
+static void mock_ring_free(struct intel_ring *base)
+{
+ struct mock_ring *ring = container_of(base, typeof(*ring), base);
+
+ i915_timeline_fini(&ring->timeline);
+ kfree(ring);
+}
+
static struct mock_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -37,24 +83,29 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
-static void advance(struct mock_engine *engine,
- struct mock_request *request)
+static void advance(struct mock_request *request)
{
list_del_init(&request->link);
- mock_seqno_advance(&engine->base, request->base.global_seqno);
+ intel_engine_write_global_seqno(request->base.engine,
+ request->base.global_seqno);
+ i915_request_mark_complete(&request->base);
+ GEM_BUG_ON(!i915_request_completed(&request->base));
+
+ intel_engine_queue_breadcrumbs(request->base.engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct mock_request *request;
+ unsigned long flags;
- spin_lock(&engine->hw_lock);
+ spin_lock_irqsave(&engine->hw_lock, flags);
/* Timer fired, first request is complete */
request = first_request(engine);
if (request)
- advance(engine, request);
+ advance(request);
/*
* Also immediately signal any subsequent 0-delay requests, but
@@ -66,20 +117,24 @@ static void hw_delay_complete(struct timer_list *t)
break;
}
- advance(engine, request);
+ advance(request);
}
- spin_unlock(&engine->hw_lock);
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
}
static void mock_context_unpin(struct intel_context *ce)
{
+ mock_timeline_unpin(ce->ring->timeline);
i915_gem_context_put(ce->gem_context);
}
static void mock_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
+
+ if (ce->ring)
+ mock_ring_free(ce->ring);
}
static const struct intel_context_ops mock_context_ops = {
@@ -92,14 +147,26 @@ mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = to_intel_context(ctx, engine);
+ int err = -ENOMEM;
- if (!ce->pin_count++) {
- i915_gem_context_get(ctx);
- ce->ring = engine->buffer;
- ce->ops = &mock_context_ops;
+ if (ce->pin_count++)
+ return ce;
+
+ if (!ce->ring) {
+ ce->ring = mock_ring(engine);
+ if (!ce->ring)
+ goto err;
}
+ mock_timeline_pin(ce->ring->timeline);
+
+ ce->ops = &mock_context_ops;
+ i915_gem_context_get(ctx);
return ce;
+
+err:
+ ce->pin_count = 0;
+ return ERR_PTR(err);
}
static int mock_request_alloc(struct i915_request *request)
@@ -118,9 +185,9 @@ static int mock_emit_flush(struct i915_request *request,
return 0;
}
-static void mock_emit_breadcrumb(struct i915_request *request,
- u32 *flags)
+static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
{
+ return cs;
}
static void mock_submit_request(struct i915_request *request)
@@ -128,51 +195,20 @@ static void mock_submit_request(struct i915_request *request)
struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
+ unsigned long flags;
i915_request_submit(request);
GEM_BUG_ON(!request->global_seqno);
- spin_lock_irq(&engine->hw_lock);
+ spin_lock_irqsave(&engine->hw_lock, flags);
list_add_tail(&mock->link, &engine->hw_queue);
if (mock->link.prev == &engine->hw_queue) {
if (mock->delay)
mod_timer(&engine->hw_delay, jiffies + mock->delay);
else
- advance(engine, mock);
+ advance(mock);
}
- spin_unlock_irq(&engine->hw_lock);
-}
-
-static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
-{
- const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
-
- BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
-
- ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
- if (!ring)
- return NULL;
-
- i915_timeline_init(engine->i915, &ring->timeline, engine->name);
-
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
-
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
-
- return &ring->base;
-}
-
-static void mock_ring_free(struct intel_ring *base)
-{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
-
- i915_timeline_fini(&ring->timeline);
- kfree(ring);
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -191,39 +227,37 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
- engine->base.status_page.page_addr = (void *)(engine + 1);
+ engine->base.status_page.addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
- engine->base.emit_breadcrumb = mock_emit_breadcrumb;
+ engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+ if (i915_timeline_init(i915,
+ &engine->base.timeline,
+ engine->base.name,
+ NULL))
+ goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
- engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- engine->base.buffer = mock_ring(&engine->base);
- if (!engine->base.buffer)
- goto err_breadcrumbs;
-
if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
- goto err_ring;
+ goto err_breadcrumbs;
return &engine->base;
-err_ring:
- mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
+err_free:
kfree(engine);
return NULL;
}
@@ -237,16 +271,14 @@ void mock_engine_flush(struct intel_engine_cs *engine)
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
- list_del_init(&request->link);
- mock_seqno_advance(&mock->base, request->base.global_seqno);
- }
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
+ intel_engine_write_global_seqno(engine, 0);
}
void mock_engine_free(struct intel_engine_cs *engine)
@@ -263,8 +295,6 @@ void mock_engine_free(struct intel_engine_cs *engine)
__intel_context_unpin(engine->i915->kernel_context, engine);
- mock_ring_free(engine->buffer);
-
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h
index 133d0c21790d..b9cc3a245f16 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.h
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.h
@@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine);
-static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- intel_engine_wakeup(engine);
-}
-
#endif /* !__MOCK_ENGINE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 43ed8b28aeaa..14ae46fda49f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -58,8 +58,8 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
- cancel_delayed_work_sync(&i915->gt.retire_work);
- cancel_delayed_work_sync(&i915->gt.idle_work);
+ drain_delayed_work(&i915->gt.retire_work);
+ drain_delayed_work(&i915->gt.idle_work);
i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -68,13 +68,14 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_timelines_fini(i915);
+
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
- mock_fini_ggtt(i915);
+ mock_fini_ggtt(&i915->ggtt);
mutex_unlock(&i915->drm.struct_mutex);
- WARN_ON(!list_empty(&i915->gt.timelines));
destroy_workqueue(i915->wq);
@@ -147,22 +148,24 @@ struct drm_i915_private *mock_gem_device(void)
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
pdev->dev.release = release_dev;
dev_set_name(&pdev->dev, "mock");
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
/* hack to disable iommu for the fake device; force identity mapping */
pdev->dev.archdata.iommu = (void *)-1;
#endif
+ i915 = (struct drm_i915_private *)(pdev + 1);
+ pci_set_drvdata(pdev, i915);
+
+ intel_runtime_pm_init_early(i915);
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev));
- i915 = (struct drm_i915_private *)(pdev + 1);
- pci_set_drvdata(pdev, i915);
-
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
@@ -186,6 +189,7 @@ struct drm_i915_private *mock_gem_device(void)
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -223,13 +227,14 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->priorities)
goto err_dependencies;
- INIT_LIST_HEAD(&i915->gt.timelines);
+ i915_timelines_init(i915);
+
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
mutex_lock(&i915->drm.struct_mutex);
- mock_init_ggtt(i915);
+ mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
@@ -250,6 +255,7 @@ err_context:
i915_gem_contexts_fini(i915);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
+ i915_timelines_fini(i915);
kmem_cache_destroy(i915->priorities);
err_dependencies:
kmem_cache_destroy(i915->dependencies);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 6ae418c76015..cd83929fde8e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
- i915_address_space_init(&ppgtt->vm, i915);
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.clear_range = nop_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
@@ -97,11 +97,12 @@ static void mock_unbind_ggtt(struct i915_vma *vma)
{
}
-void mock_init_ggtt(struct drm_i915_private *i915)
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ memset(ggtt, 0, sizeof(*ggtt));
ggtt->vm.i915 = i915;
+ ggtt->vm.is_ggtt = true;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -117,14 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- i915_address_space_init(&ggtt->vm, i915);
-
- ggtt->vm.is_ggtt = true;
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
}
-void mock_fini_ggtt(struct drm_i915_private *i915)
+void mock_fini_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 9a0a833bb545..40d544bde1d5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,8 +25,8 @@
#ifndef __MOCK_GTT_H
#define __MOCK_GTT_H
-void mock_init_ggtt(struct drm_i915_private *i915);
-void mock_fini_ggtt(struct drm_i915_private *i915);
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
+void mock_fini_ggtt(struct i915_ggtt *ggtt);
struct i915_hw_ppgtt *
mock_ppgtt(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index dcf3b16f5a07..d2de9ece2118 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -10,11 +10,13 @@
void mock_timeline_init(struct i915_timeline *timeline, u64 context)
{
+ timeline->i915 = NULL;
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
- init_request_active(&timeline->last_request, NULL);
+ INIT_ACTIVE_REQUEST(&timeline->barrier);
+ INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -24,5 +26,5 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
void mock_timeline_fini(struct i915_timeline *timeline)
{
- i915_timeline_fini(timeline);
+ i915_syncmap_free(&timeline->sync);
}
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 361e962a7969..6403728fe778 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,7 +23,6 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -257,9 +256,9 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int intel_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -276,7 +275,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config,
conn_state->scaling_mode);
else
@@ -285,11 +284,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return false;
+ return -EINVAL;
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
if (IS_GEN9_LP(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
adjusted_mode->private_flags |=
@@ -303,16 +307,16 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
ret = bxt_dsi_pll_compute(encoder, pipe_config);
if (ret)
- return false;
+ return -EINVAL;
} else {
ret = vlv_dsi_pll_compute(encoder, pipe_config);
if (ret)
- return false;
+ return -EINVAL;
}
pipe_config->clock_set = true;
- return true;
+ return 0;
}
static bool glk_dsi_enable_io(struct intel_encoder *encoder)
@@ -674,6 +678,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
+
+ if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888)
+ temp |= DITHERING_ENABLE;
+
/* assert ip_tg_enable signal */
I915_WRITE(port_ctrl, temp | DPI_ENABLE);
POSTING_READ(port_ctrl);
@@ -960,13 +968,15 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ intel_wakeref_t wakeref;
enum port port;
bool active = false;
DRM_DEBUG_KMS("\n");
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
/*
@@ -1022,7 +1032,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
out_put_power:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return active;
}
@@ -1058,10 +1068,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
- pipe_config->pipe_bpp =
- mipi_dsi_pixel_format_to_bpp(
- pixel_format_from_register_bits(fmt));
- bpp = pipe_config->pipe_bpp;
+ bpp = mipi_dsi_pixel_format_to_bpp(
+ pixel_format_from_register_bits(fmt));
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
@@ -1199,11 +1207,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
- pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
- pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
if (pclk) {
@@ -1575,6 +1581,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
enum drm_panel_orientation orientation;
struct intel_plane *plane;
struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
enum pipe pipe;
u32 val;
@@ -1585,7 +1592,8 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
plane = to_intel_plane(crtc->base.primary);
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
val = I915_READ(DSPCNTR(plane->i9xx_plane));
@@ -1597,7 +1605,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
else
orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return orientation;
}
@@ -1625,7 +1633,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH_DISPLAY(dev_priv))
+ if (!HAS_GMCH(dev_priv))
allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(&connector->base,
@@ -1689,6 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
+ intel_encoder->update_pipe = intel_panel_update_backlight;
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index a132a8037ecc..954d5a8c4fa7 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -252,20 +252,12 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
-static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
-{
- int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
-
- WARN(bpp != pipe_bpp,
- "bpp match assertion failure (expected %d, current %d)\n",
- bpp, pipe_bpp);
-}
-
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
@@ -319,15 +311,12 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
dsi_clock = (m * refclk) / (p * n);
- /* pixel_format and pipe_bpp should agree */
- assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
- pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+ pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
return pclk;
}
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
u32 pclk;
@@ -335,12 +324,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 dsi_ratio;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- /* Divide by zero */
- if (!pipe_bpp) {
- DRM_ERROR("Invalid BPP(0)\n");
- return 0;
- }
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
@@ -348,10 +332,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
- /* pixel_format and pipe_bpp should agree */
- assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
- pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
+ pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
return pclk;